var/home/core/zuul-output/0000755000175000017500000000000015133726347014540 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015133753454015503 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000355335515133753415020300 0ustar corecore oikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD ~s!mv?_eGbuuțx{w7ݭ7֫~𒆷7̗8zTY\].f}嗷ovϷw_>on3cvX~egQBeH,nWb m/m}*L~AzHev_uαHJ2E$(Ͽ|/+k*z>p R⥑gF)49)(oՈ7_k0m^p9PneQn͂YEeeɹ ^ʙ|ʕ0MۂAraZR}@E1%]˜(O)X(6I;Ff"mcI۫d@FNsdxό?2$&tg*Y%\ߘfDP'F%Ab*d@e˛H,љ:72 2ƴ40tr>PYD'vt'oI¢w}o٬owko%gQ(%t#NL֜ eh&Ƨ,RH 4*,!SD 1Ed_wkxdL3F;/u7Taqu5Ոӄp\2dd$YLYG(#?%U?hB\;ErE& SOZXHBWy|iZ~hal\t2Hgb*t--ߖ|Hp(-J C?>:zR{܃ lM6_OފߍO1nԝG?ƥF%QV5pDVHwԡ/.2h{qۀK8yUOdssdMvw`21ɻ]/ƛ"@8(PN_,_0;o_x+Vy<h\dN9:bġ7 -Pwȹl;M@n̞Qj_P\ Q]GcPN;e7Vtś98m1<:|a+.:a4nՒ,]LF0);I$>ga5"f[B[fhT/ɾgm\Sj#3hEEH*Nf äE@O0~y[쾋t=iYhșC 5ܩa!ǛfGtzz*з 55E9Fa?Zk80ݞN|:AОNo;Ⱦzu\0NMޅf_" J9w4ts7NG GGG]ҡgc⌝M b/Ζlpah E ur C&`XR JcwB~R2EL9j7e\(Uё$׿atyХ?*t5z\+`/ErVQUxMҔ&ۈt.3;eg_O ξL1KiYLizpV:C5/=v-}҅"o ']쌕|tϓX8nJ*A*%J[T2pI1Je;s_[,Ҩ38_ь ͰM0ImY/MiVJ5&jNgBt90v߁R:~U jځU~oN9xԞ~J|dݤ߯R> kH&Y``:"s ayiBq)u%'4 yܽ yW0 -i̭uJ{KưЖ@+UBj -&JO jKi0>,A==lM9Ɍm4ެ˧jOC d-saܺCY "D^&M){ߘ>:i V4nQi1h$Zb)ŠȃAݢCj|<VxTlHz `=\T\=y~eA>DG.b~?|ђP? -8%JNIt"`HP!]ZrͰ4j8!*(jPcǷ!)'xmv>!0[r_G{j 6JYǹ>zs;tc.mctie:x&"bR4S uV8/0%X8Ua0NET݃jYAT` &AD]Ax95mvXYs"(A+/_+*{b }@UP*5ì"M|܊W7|}N{mL=d]' =MS2[3(/hoj$=Zm Mlh>P>Qwf8*c4˥Ęk(+,«.c%_~&^%80=1Jgͤ39(&ʤdH0Ζ@.!)CGt?~=ˢ>f>\bN<Ⱦtë{{b2hKNh`0=/9Gɺɔ+'Х[)9^iX,N&+1Id0ֶ|}!oѶvhu|8Qz:^S-7;k>U~H><~5i ˿7^0*]h,*aklVIKS7d'qAWEݰLkS :}%J6TIsbFʶ褢sFUC)(k-C"TQ[;4j39_WiZSس:$3w}o$[4x:bl=pd9YfAMpIrv̡}XI{B%ZԎuHvhd`Η|ʣ)-iaE';_j{(8xPA*1bv^JLj&DY3#-1*I+g8a@(*%kX{ Z;#es=oi_)qb㼃{buU?zT u]68 QeC Hl @R SFZuU&uRz[2(A1ZK(O5dc}QQufCdX($0j(HX_$GZaPo|P5q @3ǟ6 mR!c/24مQNֆ^n,hU֝cfT :):[gCa?\&IpW$8!+Uph*/ o/{")qq҈78݇hA sTB*F$6 2C` |ɧJ~iM cO;m#NV?d?TCg5otޔC1s`u.EkB6ga׬9J2&vV,./ӐoQJ*Dw*^sCeyWtɖ9F.[-cʚmD (QMW`zP~n"U'8%kEq*Lr;TY *BCCpJhxUpܺDoGdlaQ&8#v| (~~yZ-VW"T- 0c̖F4BJ2ᮚ苮p(r%Q 6<$(Ӣ(RvA A-^dX?T.|!p+,ICE^fu `|M3J#BQȌ6DNnCˣ"F$/Qx%m&FK_7P|٢?I-RiAKoQrMI>QQ!'7h,sF\jzP\7:Q\)#s{p'ɂN$r;fVkv߸>6!<̅:xn<# -BȢ1I~ŋ-*|`В~_>ۅm}67X9z=Oa Am]fnޤ{"hd߃Ԉ|tLD3 7'yOc& LFs%B!sRE2K0p\0͙npV)̍F$X8a-bp)5,] Bo|ؖA]Y`-jyL'8>JJ{>źuMp(jL!M7uTźmr(Uxbbqe5rZ HҘ3ڴ(|e@ew>w3C=9k-{p>րd^T@eFZ#WWwYzK uK r؛6V L)auS6=`#(TO֙`mn Lv%7mSU@n_Vۀl9BIcSxlT![`[klzFض˪.l >7l@ΖLl gEj gWUDnr7AG;lU6ieabp៚U|,}S@t1:X _ .xI_7ve Z@7IX/C7@u BGڔE7M/k $q^hڧ};naU%~X!^C5Aw͢.@d!@dU}b? -ʏw |VvlK۴ymkiK% 0OFjT_kPW1mk%?\@R>XCl}b ,8; :.b9m]XaINE`!6uOhUuta^xN@˭d- T5 $4ذ:[a>֋&"_ }Oõϸ~rj uw\h~M il[ 2pCaOok.X0C?~[:^Pr򣏷y@/ڠ --i!M5mjozEƨ||Yt,=d#uЇ  l]չoݴmqV".lCqBѷ /![auPmpnEjus]2{2#b'$?T3{k>h+@]*pp桸]%nĴFԨlu |VXnq#r:kg_Q1,MNi˰ 7#`VCpᇽmpM+tWuk0 q /} 5 ¶]fXEj@5JcU_b@JS`wYmJ gEk2'0/> unKs^C6B WEt7M'#|kf1:X l]ABC {kanW{ 6 g`_w\|8Fjȡstuf%Plx3E#zmxfU S^ 3_`wRY}@ŹBz²?mК/mm}m"Gy4dl\)cb<>O0BďJrDd\TDFMEr~q#i}$y3.*j) qQa% |`bEۈ8S 95JͩA3SX~߃ʟ~㍖›f!OI1R~-6͘!?/Vvot4~6I@GNݖ-m[d<-l9fbn,'eO2sٟ+AWzw A<4 }w"*mj8{ P&Y#ErwHhL2cPr Wҭюky7aXt?2 'so fnHXx1o@0TmBLi0lhѦ* _9[3L`I,|J @xS}NEij]Qexx*lJF#+L@-ՑQz֬]")JC])"K{v@`<ۃ7|qk" L+Y*Ha)j~pu7ި!:E#s:ic.XC^wT/]n2'>^&pnapckL>2QQWo/ݻ<̍8)r`F!Woc0Xq0 R' eQ&Aѣzvw=e&".awfShWjÅD0JkBh]s9Ą|ק_;%X6Q@d 8&a)a.#ۿD> vfA{$g ăyd) SK?ɧ ۓ,j|z6OSu;BKŨʐPqO K\{jDiy@}b|Z79ߜih(+PKO;!o\戔-QB EM;oH$$]?4~YrXY%Ο@oHwlXiW\ΡbN}l4VX|"0]! YcVi)@kF;'ta%*xU㔸,A|@WJfVP6`ڼ3qY.[U BTR0u$$hG$0NpF]\ݗe$?# #:001w<{{B\rhGg JGIެE.:zYrY{*2lVǻXEB6;5NE#eb3aīNLd&@yz\?))H;h\ߍ5S&(w9Z,K44|<#EkqTkOtW]﮶f=.*LD6%#-tңx%>MZ'0-bB$ !)6@I<#`L8턻r\Kuz*]}%b<$$^LJ<\HGbIqܢcZW {jfѐ6 QڣPt[:GfCN ILhbB.*IH7xʹǙMVA*J'W)@9 Ѷ6jىY* 85{pMX+]o$h{KrҎl 5sÁbNW\: "HK<bdYL_Dd)VpA@A i"j<鮗 qwc&dXV0e[g#B4x╙✑3'-i{SEȢbK6}{Ⱥi!ma0o xI0&" 9cT)0ߢ5ڦ==!LgdJΆmΉO]T"DĊKٙ@qP,i Nl:6'5R.j,&tK*iOFsk6[E__0pw=͠qj@o5iX0v\fk= ;H J/,t%Rwó^;n1z"8 P޿[V!ye]VZRԾ|“qNpѓVZDdy@ZA:e\ɐ( m,'I;{6m)ݮ'pZrkjz7SX'\ya4bpnhb .n`ZT:tQt !@ӐCʤYVX Fop4I1'$e),~l'E]'NCYҎf$7'N\/ޤ;[j'!96.x}x{~o6q0&s !ZFtr~ Z3q0tGJ<&\'h1.Лp,znO1b9R7'a9NPHGSd?'xc 73\+s$N9tf{w5n%՘#TI{h5pfIWGev.F_{!7*qW}ӼتY&7!5Y'3KtpwU`<d5J{@ݜ P?&p//C:sBciۨvtl:J;quӋkKϮ듃ԁ6Y.0O۾'8V%1M@)uIw].5km~Ҷ綝R(mtV3rșjmjJItHڒz>6nOj5~IJ|~!yKڮ2 h 3x}~ے4WYr9Ts] AA$ұ}21;qbUwRK #}u'tLi'^Y&,mCM)eu㠥Ѻ\a}1:V1zMzT}R,IA e<%!vĉq|?mtB|A ?dXuWLGml?*uTC̶V`FVY>ECmDnG+UaKtȃbeb筃kݴO~f^⊈ 8MK?:mM;ߵoz+O~e3݌ƺ(ܸf)*gCQE*pp^~x܃`U'A~E90t~8-2S󹞙nk56s&"mgVKA: X>7QQ-CDC'| #]Y1E-$nP4N0#C'dvܸȯ.vIH"ŐR ;@~y>Kv{) 9AG ćͩ$.!б~N8i"1KФ\L7/,U@.ڮO?mُa ې!rGHw@56DǑq LA!&mYJ*ixz2*{_;IYJXFfQ* 0kA".mݡ"3`Rd1_u6d逖`7xGMf}k/⨼0Κ_pLq7k!dT x삖A7 u/~&ӄMu.<|yi I?@)XJ7{ޱ?Q]{#\4ZfR-dVaz./f+yGNMGOK?2_~3\z=y}^G$*A! IcuR.o=MZ9zu b#s9@*иrI@*qQN||Ix;I}&ݢ6ɢ}{]x}_o>Mm8S]~(EX{v8FHӜ"D$aǽO8'1lfYuB!6!=?8[Y|-ɬeǪzd;-s~CM>e:9[_v~\:P ؇'k01Q1jlX)/ΏL+NhBUx~Ga>Z"Q_wjTLRˀtL L+BT҂ll魳cf[L̎`;rK+S- (J[(6 b F? ZvƂcW+dˍ-m𢛲@ms~}3ɱ© R$ T5%:zZ甎܋)`ŰJ38!;NfHohVbK :S50exU}W`upHЍE_fNTU*q%bq@/5q0);F74~'*z[\M-~#aSmMÉB2Nnʇ)bAg`u2t"8U [tJYSk, "vu\h1Yhl~[mhm+F(g 6+YtHgd/}7m]Q!Mę5bR!JbV>&w6οH+NL$]p>8UU>Ѫg39Yg>OF9V?SAT~:gGt $*}aQ.Zi~%K\rfm$%ɪq(%W>*Hg>KStE)KS1z2"h%^NEN?  hxnd/)O{,:خcX1nIaJ/t4J\bƀWc-d4M^d/ ʂK0`v%"s#PCoT/*,:[4b=]N&, ,B82^WK9EHLPm))2.9ȱ  QAcBC-|$M\^B!`}M^t+C~Lb }D>{N{Vt)tpDN,FCz~$)*417l;V iэ(_,j]$9O+/Sh]ice wy\Mڗ$,DJ|lj*à␻,?XAe0bX@ h0[}BU0v']#Vo !ې: Z%ƶ(fl>'"Bg< 0^_d0Y@2!ӸfZ{Ibi/^cygwדzY'Ź$:fr;)ٔf ՠ3Kcxwg*EQU{$Sڸ3x~ 5clgSAW"X Pҿ.ظwyV}̒KX9U1>V..W%GX +Uvzg=npu{do#Vb4ra\sNC/T"*!k愨}plm@+@gSUX覽t01:)6kSL9Ug6rEr(3{ xRP8_S( $?uk| ]bP\vۗ晋cgLz2r~MMp!~~h?ljUc>rw}xxݸǻ*Wu{}M?\GSߋ2ꮺ5w"7U0)lۨB0ח*zW߬V}Z۫ܨJ<]B=\>V7¯8nq~q?A-?T_qOq?5-3 |q|w.dަ'/Y?> (<2y. ">8YAC| w&5fɹ(ȊVã50z)la.~LlQx[b&Pĥx BjIKn"@+z'}ũrDks^F\`%Di5~cZ*sXLqQ$q6v+jRcepO}[ s\VF5vROq%mX-RÈlб 6jf/AfN vRPػ.6<'"6dv .z{I>|&ׇ4Ăw4 [P{]"}r1殲)ߚA 2J1SGpw>ٕQѱ vb;pV ^WO+į1tq61W vzZ U'=҅}rZ:T#\_:ď);KX!LHuQ (6c94Ce|u$4a?"1] `Wa+m𢛲`Rs _I@U8jxɕͽf3[Pg%,IR Ř`QbmүcH&CLlvLҼé1ivGgJ+u7Τ!ljK1SpHR>:YF2cU(77eGG\ m#Tvmە8[,)4\\=V~?C~>_) cxF;;Ds'n [&8NJP5H2Զj{RC>he:ա+e/.I0\lWoӊĭYcxN^SPiMrFI_"*l§,̀+ å} .[c&SX( ( =X?D5ۙ@m cEpR?H0F>v6A*:W?*nzfw*B#d[se$U>tLNÔ+XX߇`cu0:U[tp^}{>H4z 4 (DtH-ʐ?sk7iIbΏ%T}v}e{aBs˞L=ilNeb]nltwfCEI"*S k`u ygz[~S [j3+sE.,uDΡ1R:Vݐ/CBc˾] shGՙf 2+);W{@dlG)%عF&4D&u.Im9c$A$Dfj-ء^6&#OȯTgرBӆI t[ 5)l>MR2ǂv JpU1cJpրj&*ߗEЍ0U#X) bpNVYSD1౱UR}UR,:lơ2<8"˓MlA2 KvP8 I7D Oj>;V|a|`U>D*KS;|:xI/ió21׭ȦS!e^t+28b$d:z4 .}gRcƈ^ʮC^0l[hl"য*6 ny!HQ=GOf"8vAq&*țTOWse~ (5TX%/8vS:w}[ą qf2Lυi lm/+QD4t.P*2V J`\g2%tJ4vX[7g"z{1|\*& >Vv:V^S7{{u%[^g=pn]Y#&ߓTί_z7e&ӃCx;xLh+NOEp";SB/eWٹ`64F 2AhF{Ɩ;>87DǍ-~e;\26Lة:*mUAN=VޮL> jwB}ѹ .MVfz0Ïd0l?7- }|>TT%9d-9UK=&l&~g&i"L{vrQۻou}q}hn+.{pWEqws]]|/ǫ\}/J.MLmc ԗWrU}/Ǜ+sYn[ﯾeywyY]]¨Kpx c./mo;ߟRy*4݀wm&8֨Or4 &+Bs=8'kP 3 |}44S8UXi;f;VE7e4AdX-fS烠1Uܦ$lznlq"җ^s RTn|RKm;ԻZ3)`S!9| ?}m*2@"G{yZ${˪A6yq>Elq*E< NX9@: Ih~|Y4sopp|v1f2춓t$+mYE} %K'q2cwt& 0ͦkn"[s")YrlbbLX@Ċ_G1Q4Qepד%2TIE[ jSdd+ͷU r=B"JE^Dޕ64ACUR\l9X+(yՕuG+_jsD0ˇAΣarAP%3d@wIى풲n fyύ/M4isck~|*7,?/, G8ϋ+[P8>\ʰ4ajk^x`myōP6l' ̐;uZM@n1W,cȊgE5qU mW;g8 /^W%0^piUz#u 6A3 왧x2ow'U^dFn.)Ǿd O7n^(tC<#O 6Rd1Cٶ9]| Ͳ礯wA 5<852@S,LUξCm<"|wO*KT&nSHۮ{sp>uU.TB7/TB. kl =[%{E3Yt̋jЂ«,(c8=כ{濩ۺ؎xɒx x}d0Xot6TݱK%oDKnkP 8[ąYVنoudrA["9w8yUӋ%-QXMP6&]IXԈ,J{T"" /FsV<x[f@aZ"**x=^ Y{Q(~f-4f뺆%Lkor/aZv빚,3rsA3?ř$<3 ܮ t{>LjGB ZluYgďdZ^,xEA>лX煸~X[6=dl6R5kJ%Big(D SѿQ9YPbTγ:tQ5.u1>?eK `BU/'t7x9XyYNQ-1ֺ`y:Y1J`,FQW-0eh N6TUE}wKRoGƳ i_FG)A3dprX%n3ȠK#啈Y\f+9J;oUڥhw>΂\s x1G C2S\giw'^ѬY$ mțt^D 7s\W4,XޝzyQXDUѻW[@FXk ^ *q]Xa>^kC W\]+}.!P+4ZTDw N,OILE%e0N׸@qބkۯg>;n&07+`{]U<:`z #o+6iuV1JsՕx,R`E|KSyLnؼ7QYyNߒ8lHW"0TS':ƒ5 ֬}xT5 !p<,>F @lџmm{\FK*?(3,^e~=ko>K&~*h#z+c ģNF&Ny4,=|Mي!EnNv~,'s/1!4$"AY•W[6+qO} mW`J4Ȣm<JDAhU\xR_%Er(e_7{&3"a4ͭAToN[]F4ښ.=,JЬGː FFJWw!U*-׬a90tr8SN&SLP*+2]h]LM%m1N R`lVH$a8*k2?dq`ڏ9 (mƕPξtX4g"*qsʕvKu3OEk[;s|Dj^fgvb DIY"ʜqKO)' 3˛rp WyT~>C}A\,hbfxpi\ApHdWuKUɚ|IN92m$XbfyUJ T͔y@lJ3({!x >MّmYBldy!xeQ3  t({H25GJޅ ](!HC}3ڼj,m}GbIYkZY]waYP*tsׅPeԾNL# ̳bFYjR0 eMk9noZi%Y!R^ҞVw_mg",[M¥T D}!l[OPR1Պ0Vo ){RTC&["m-1uw` +@Nu`$oj@Yшݽ7۶/A42bX~S3@2Kljˑ-WnN.ѻSyXݱcM#uw %_xQdߺ㿭f+9>~qu|Qp\yFlSeliLF3P̑ܤ[ĦkIn9w%JɱCyqZ!:\{o>>Ze[+ES`? mbrt HP3m Cnxzk>wCO7 8,pU;h.crvD!G%Kk8ǒZ rXRoIl(VIn@G*tnD9׬+[:diEHҔ8Mؑ`"v,yzWnQ!0]Ё%yƎڔ-##ʟ55ZLΒESV2;}"gh>;Dt'ãKmEb]Y)v쟼,}Zir-GJ٫ξOf,Rmom4 HՃw/!0teERp%ʱE:=4«E useU=.,{ Aj4²ͧތ㣦?tU& gey.T4DޣHx L?~u1'8=KL%2X 1,i{#D1i\%2G2[92EW6.^q][= E"*agыMk^аyڣSweglOyPYv=ob^uxFӳ,$a^)]sE",s򶟗r)}HՓ9Ugq6[̓,tO<]{$gh$̝4 $Z\k-}dI-}Ӭ޳ʪq+{Kf턭q36ݜP|:eLJ^Aj?}~{MpQxVy *GPw wh}P ;ҘjJJYS#4Mvghkjq {pw_x샸LPP.,`j뗿W3`PES#xvh(ԜCiP ej֭=6!u 2 p5HP6f> B3bᴅ@7t>V,OɑnXyOG m|l?@&uED-tW4թTeZyGO{Iοss(w!S-@~Xl@N(=?Ba?z}90ę5:3"O&S(n9sd%S`2/ׇ"wTX<X+BLJߞ5kr?xuP^xo9~kEp>[ӏX ʎ,E~]? A 'ܚvJ![`aNԼͶG% @SG;`ԩ?~vbߣH}?x}kh4^@O( ;K0 K=<#pq0˰ Ds|;<+Gߠ `3i0'@'wug{@D_t_԰=cRڀ*n~sj wcQ۹r'N?ĹQN8&]Z #Ks 93Iڭ3'o\7߾3YrEtrFY@dXZ@a o ԏp,`eP *$0AupW2+4 x^@ӣ:n@DG\w$$*.G| -xb9\eYhE$Ń,9q!0p;_4_pHTYjiq]Ur1mіm9P|` d*MVd 6s=Y9nVLLs^Z}0Fp;D2'Ia$Yf .K]ejkEVVRMDh騤c8$̋K`gq/cjtƼY1=3X=l@UzUcۃtLЄ vұ͸&>mw>oQs-8eyr%/ed|AbgO*2m쯤!]j2USד,v5ݩ׵].#x+Gt;UnU\kW*p*u]b4w19Wi0X땈"(Hu ;0˫E ^ F.wiیr:Ge(^E{K 8Wr- VNR0fkVrV| p ?m(f֣]eӈB>fr)=2N O+R`݃c=((5 X&m\/?aou M(݀P9y'J7!tK&}JЩJڭt~]؍z)]ܾ}rnһM-ۀX9y'R6 il@9uHM(߀P9y'7 mn@9uH^PoB GDB ۄoNKl/% yV\ψpƂTsP;3鉄Y٬8|r)N;`3qWE1Г|qQ,$9/8{;&$&q;Βi_J:hpN!Lk5?7efv A;e³!|k6|ڱ`k# DRBO:dN!;E.PNg@̅NciL`ᡱA)|"TY |PMLţ"=2k@ߜyw0!vU^㗔gJ@5X<=B5{VQ'fqFE x2jYt1’B"J y8^ZpQ]2wW%ʁD8( S r9Tf9,bi׺lSHoGg/[9C/bA1 2(%³ب}G ewG(kၾ6AIj ؇(s5Iu;K)g!?48bg;x%2O Fy'h_oصZ t84:S_1*|Afj^Mb00A}.Br07 5-POpB/\&S1 -D?rt ?xa{'yVC<Cލu3}.(?~K#:uYkaSV瞯42kC]+)A)tС;".*><_q.VP#@G: @ kɬ5"pQxJgUGC:JP:?aLă5U%lVƂz6B+n[h(o0Cvz$MC2(yaH{1Z m 4Tf`Gw%^h;URfp*-gqb0*nw8&!} ,МfƐP3bq\ C{YT`בqL$tY(xB;e" t1ɁoΗcP jczhk%-#(v1wYƵRN)}!ă:!V͝l..k*ϼڥ ˲ 7 TWԪ #kն(K]0AG59h]'BIuΈ׆}rDw"4E:v\53 gl<'rKVVHGo?vlPhѿ4B(ɮ9 =/KK|- i2樇7ޒ%{k``@,=br;i tU*3,(Pg *Mpb h.'MryJ B@`r<F.;'Snي3\wDg/ 5@s%EmG!K+q#:|g,OxҞg͠VKX>jQf˙eDam//A"qJ@mRKS[*[lЬ&XEQ(GSfM){c ^UW9Ä9^]cblq|[{Xק{̵3w;F[6kk|a+`WDX$d-K~ĨE uji*&BM7u)R-L6YNF^DL?>}GZn S#SRbR.iTKM~&F=Sޞ#ǴbMY̺9`mELFﰱ׆q| !^q15)ˊ <jmd^^VR\7(uPKse}PoؠEi}7 2Vb,x67pZyi0lՑmVV5Xg_%Ss!&Y@VDRD7d7)D&xF2FCoVw k @*T elXoDى%rQ|)ԪoV!L$a8 X=A6DL'+{NwK"i76zrT-8j"$rJAdd)^" @$FIs / "r% $DONϕE؍6VT,eax4$ _)Eǰ]3AEY$7qJ8̂1SqJY svj!1*G9pNűHӚsZ^@$]H6X_H##lEAHkQcЬҺ8VS A9O.x7t(JqhhW"ꮟGHk[UB B lPI!#*4եO-_Ȗ3EL"+qaA0*raO6&jP0?hBsVh Eҙﲻ >e3!F^ ]^\ES-eTD;IgG=Gŀ I-KR$ؔB#A<[gs-*h|h-m F6ML$c5 n^xDеkA`xIN: Auf1MdgXR^;?#w;XkT 'Sf/Z˨*iQ$ ERjYz||1?G~;iB1x&Y|zvϜρdjml*Bb4*HCld HFpiZV]#="8?Nm1[!$Dp]x']`'Lӗbzr֥HvNq:ݵ_84e@dC3ًV)յ:j浦-KעH:`rVv'x\Ud_Sn20HC 1߳ēU1m.HTr*bWE} %;ƫz.JI4+,W b"ooXp)9lh23ދK@W-Hzuymp{CZ76+TFwQ٢(fPVjoK:;?m# Q-{T`ybSf_:)qJ%JKhigsldfeO]<5h֯ADOSf%(bC*7cnY͞Tt/M)Gd0zi[q@bf1n2?åQ7mI5xС2;85̱xQtAI(~P8ψz.&E-9В \\BFCAn,ՙ02@svt{DpBZTQچ'_5ti:Ien=yĥy7BBRیhq&rӨ隼Xib%b!\~I"=:uݗMbZ?>r~|L<ܯWɓaY * R;zݴo,MfDMRxd\!P|EtOaɥk+%,f?V.xI\=+,8z(S\fv^zWcvczfLH%Ih$ق>q< k6ʊi<B)c|^Q(m?:xRףK `TFg*P!e1ڬE`>_sZߎ[3F-jdeۯG ~˚48q (3>+lYpr{m~ڭٵ `99T]&+"Ez#Hz7;.mF7@ظ(Nl2$0ٷփTcFE&j$~FwG u)LV(|V- ٕ=["; ?n{&4%zQj>9]L? ?S}<?@3qrw)jlYpy=>*$|[0SRx9^}IC(#4Uf/^r+6eF>r5{9[a7ȟ"Ǻ8TjYm4Csr Ԣd8desׁd d#óhvWQONz2諾!R9dwYl"s9!𹼿WƲZ{Oc\ I). S"E9=SGcV/ zxX0V}aW{mYGI%BS$MJ]CR>:Eg|ĐDgg3x#F"X{ IY~]mX3Pwuc G Q~۷U4Z5SWo.z)a&?M=&Ym}uyyl82h>[ftp0AsrzÚH.h&QnchFKkp!^Ndˤ:t( BJgJ 8z/\ۚ1S3ױuh6Tx9fc8<3l\O~YooFɥS{ѻ?GϪxMotyM x;DS(p\ևq|c5bu9 U] >P7o Pn p˳_LwAhtwb >q+1oA^v9B߁ Mdd1"cF»wDWZ 1k1"A\|_et^h _^?4g 9Utє.y4zM NiRxjbJUO,QffX:kgyݒ^c -yh JD:Qrcha w= W:ɐ)>>^Y`SWdNX&Y+,긼䴸f5kT/qf:އ޸ę)( cN/3_,~0.kNL;ǭU7$>2IXIM(Ɗb3CNEaWO&@Wo uVל?J[gfN]x71=@C\#**\;´LĬo^@s>9HpvT%\D߀-ϳw% 9s^eLԮje7Co wZac]5>'߫Sǫ+Sq~^QFv.fOǝt2@W ,旫s{LN/0rbxwAyܓIZlRti~J <ޟM׈lil1b|3g gVW#ݙ J #vx GaFCH*p3I}ɤ_o9#o <ZIkԜy]D]Q&/^U*h}*ň)-FE Zf ǬHV)0tm@+֪E L3Bn66S #c6cq4_aASQ&H2 ).mI8Q-!X(aibF-m{bVM~%E~%]3Lp*mEڠjgohب%hY-~\ݝ˓TڡSu+r4ɱMWwɗK>$PABk}H\o(?Cc̵G)d;0d`X|޵kҞY(.oͬoV/(d?~ɞ2/t6H- ӺH/ȼƊNO&I%'ǒR?PRCNy #B=vpuYE.y;)an㧂9SzԲf:,xqY'S?׸Hڅ[I/нx@uD`-N28uto 7-8ֺXvܽ;=_ܘaǼW w)(p猔(B{2轅'}PL~o<_rt?z4ʁ54?@H'J`2V SfR͕;:uCuoi&X:a2+ϨC΂R[SC}YfbOgxJ{aZd[{{,ՈI s/E\*N<?Ob׻bIws͛C@˝ԫZ윗Q1#FZ,﮹f(X(=9χlݖVf[M1hCi/*6IqsgZҏPd#e7"ݟ[:rȹMb\wa u͋.|]%!7#{lSjW}~Њ!h<G?t]߬o߼IMgh"5 _ozHkBc]sf3ymv=RSGkp˯ Wg\܌ȭkכ{Bט].< ֑o^qT r?T2V{#m2oO׸[қxnͥ#o?ձPt7_X'^DB[u3[x?b qރrcH9aMgoEGqUkOi;qz 崞Ooak{sfg.5 wXqn}%%)DW?0/m~ALXY6eP}h7(kĉ1" h_b5K܀I,1~D7},\.-/MBw6Y5~Gκ86ЏuUZ 5a<c^vmk2ogFkd^aNa, TJ㐵\X)G8b:x-nXQ3-nXӵXj^FiƑhsc#ea_}2KA %\*8a'\SBSF(Z96yPXoqVh6ɺc]I3ufmĺsO%؂ Fh8AylNo@KW:Lxa8xӊxʟLqT-'̫+9t\7wNX碄(j%; -Ej'v@NF k<;// (v0)vTkygEwEu:x5' ht\CMDROV~8D k5 35rPJ"v0GӯŎ$A."#,Ai|]2OO02ez")OozqN:eFks۷ n6ﺖbtKW;:ѣ7&2q'|4((|Ejh2Si2! gR C; |m(4@ا((`Q`G)  Foz"v6nhb4h.?s# lKLOv_k-K9BuRpn|d,`Cd9% 4jOLCcľ6i~qZ0MםAŰh{ (èBD (Y VZYk,dZ0sB ^c8c]긦)GޚTS )Ti'vCd4YOgӍfz) ZcP66Chf 5C+*cm;hJvm}t!D=fx7&.P̛.KI8 8(e<7"g!s@iz *E7\՚p6p)zKx~m-Eqh, HՖ*b2xL{ >(;kB-tگjo{A0?A o`upqQ3z&WJ(e rl,?ltn)j"#]DH12OVvQf_.(6 Ę0wph=YJɍiǘb o?G_t_{o{㘮tr7F? Sv"%ٕepزue*q!\oC9_@90nߏ(ww8눟8JsB[cƴ0٤(g|?1Հ]نq߲J`k\ -HpGq1kTnn Nj5Qv/O^&tr@3<'n0T;QAMт)ՁӘM0Apt*PKϥҔΨY\ >D0uX1MӘzJʕ FE6L-rqD` #8TĦFPg!D) …vTJiHGy$0qp 8 [ʴ)Z!B7ַ>cJHRHA09&qy^RT*@{'uKΈ0o$|56F0Z"&5uz%ܶkzK\H+ /*m9 |<ǰy NSCXـ:g̥ȧQ (@ [ER6Q MfmeQ#JNAV5)%%AlvWuWWJ Ɠ,Lu< :w_tf$N/ow>GA%S:> BMʮuΆVnU&k-Aި9CpZ,D@ DlS0`)X&LU$ƩI8%V;kLZc)9-Ԍ`Ɏ4#Wcj%aGSIR([v&ql0dJa3J bs؈}JOu}kOn$TR VRJ7VQqZҠ89%iP\4PrS0`A>L}[<l~OV0lTMU:@}, M'ejA% @MFeTFq")6IRލiب4%UE6df$5J# p6$QsBGP4 I(8hU̜NWF ( \*x" C$jFfԐ]%tUL4T5`n0FGM0KIBGiҨT-u[?&+꙲7yfVCo2-֝AXG#2-'@&~#Frd$܄ZƄ=)i`\h" M~;?kZ"s]o%JC-yXkI#2k躧@2'dRDAt,6Ni""ɐkŌeT -)ZZ״4#Wy=hBB4'$ Bjr*@ׅ1FX+.5DE T,*{e$* m{M%-bsb ͦt= kCO W~ŐLe#ɗ(Ib?~`N/$x< T'YHgf6`*DqSJD͋λ>\.<A: uI$ aav( WO8#%8) 8X6ӣiB&gꄦ!̄8iәP-2 Ҫ>$4Dox4GF+cXq$@T桪nyz%ܣO4|e5[Wɷ>R߄LCl'k#r;b}jRkilgb'|lFG$\e6I=9q ~C؄E9}#2Ï܌`#iѐ#Xh@FpqW3JO~)uJ愞̼Cr9yI]&O%b06v/NkMA9 p8&dJyO!jmej\jC:lBB:Ju*3 "* Old>H*}Bv ++F^:F, \*`7Ce"z::풗9sVK9Ys?p~ġE(*KX&|yzlD6Gr3mDХȑ7ˎ{lzT7} /u5zkf .͏}xZ6M?+8,U,o}c{Q{w&/+h۞PL(Е`Q>}<] Fȫ1 ˻#.V>_&#oy"ӻW(xKQ fsPn*@/3įоPHB(NTK\Ԝb۞>IR 'IǬt |p17t]{荇({[/KUJ`2UBUe򸨿޴T׌H#+x{EūyvWEMYDN_#Sogjx~P Px2OyRbbl8߀zu?elc8'DЛP7$_?v.5˼*xl0 :՘VYh{ FV?tM)hszy l<j󔣕/ ǴJQxȷzRj9݄LAL-f:7ՈshB** Y_E?ק1] {Op7c0QYvc.&.+7R;D=G t0D_1~HʗF@)ѥS{l'M/F!jz#;u0׶cQ4R4Od0aVN,ra-}ig'|L'At݄%4ɗ)a훪^"GxAߎ$/*~k*Έ^f{nimQۺ&!JpE hr\ѭ?ͮQ1`3[Ԉ0]:Ik `^eJPg&0nL[]SiʊAF@R<ڎԒ}}pq¨ h-6bߜY'[Bs, ~p``t rMEa~<;a$`Sx(۽xaUɪԴh/ecvfk \E4q+Vv=ЗU تhAnw9+Oxw Ca*C~Y3\ WȷO97]Q&t[q#t.CX]IW<-48#JgUio lLf9LQGuaBPm1x[(.Jv/W9&~̴MB$I SѦ@ 4agCh)v7[wRsJ$RRY6{Q6B΄WnW}.0XΏo>O ??_̘i()$kBsҤn@ZK>N@j6'+&BfNwE*6)ZHbE_-c:zaenUהa|`:/N 1z_Ů +>:D|fps&db7"XcߜmF.=5bmW h)~!n".WCmhxCOsX_!,tSMv>9WQ&T^(Nzg5]:$7h0n|Ւrx^8A߾>PCsyq !RY;jwK "@2wt9=*~}7P~WwL{Cċfÿ[~˳VM~Y,7뗭)_ƒ-0xېVT6҂ .aFޑ0nI*D?(`E#mʻPaz[ʗXMlw^N7hŪAu6 QrwMU̺bZ5j{;br|K&ZWnvo-MUoKU\$ZU\5Z5 G)aN8H.8/{LVݭYT؋qK 2@waNl#C@%lC,,jwy V'4EcVӝ˛,.f|u1jChՀK0xS}L?g+G ^{鍃od(D iJ]to& ̍(-CY ×R \#O'aڰi0Hزhd{06>o&_7~ O&dx!#yy7|iBTؾO {6 0י8̾5]uRؑD QFɝm8jP%aj.mrX20 cϢ-d#J,e-t;?'Ͻj|N=xn'<L~yJ#w" yRz7oܒ:"[XQpj6%^rVx e1zJoދFnzr&T< 0VÏaoCE%Vmgy1Yzvdj۸ &%KbUxfd͐^0e`t_GÂd68MM*{=77zy[ hV\B u[R$|C>o; ށ% ]GThu D{E؁J{1ywbsRоՂuWx~ [-ڟ_W$ "*c"(gPM^+F۹w+sZٕU(EVimGk x oqh dŠ};h}Gݕ݇RY»o)P{٢,AZ|u:[;gkY>>7? W՛Urʸ=`7TbWD͌''x >"_|bMy0 nA5.7,5ҬpFҼ+X'2) PZD`ǁJK iC89TFXB//Jw-oYo_V9;/.zm!+ - gkgxI*aGf2aHjMf$WCwF8 u ԞZΠrvh9oZ-gCSD̒J̃A-jX@DV~') UDBagBtl>RRƹ "n$&!*e RbR׈cL+Mx%>[νLwC|9#=@k m^8l3U$@(Y._G%a@2K$҂HA(C㴇I*Oh/!g(aXajLtp°qlZ\Eq,tfʚI_Гڭг=f+`\$g `a6ժUqg39UYy~5[KsJp<fœK8m|9`E\:opO:B(9eepv6I}ϫro<_KzRIhl4sEGQ/bF曮Da >Cd }Nܙ ٹ(L>z:g3|L>^gT|& Ŵ+#?;A4B;vK[Nߟw/_lt_oo?q95s0% ^ F6QKqD0˟2MGtPL TQ+ Dh9S,k7]̧=ȧjc &[uJKfj6 ?DJ&j/3~Q.ÞrqkmJ憑T U8鈧/} ]̧=ȧj31ٷ Z7ZӯڐiUtqG|(`~N/*G>UVS.jٮu{ CUJV}Pk/ ~TΤL4Q Gl_t[Tݚz$twp5囻,o;xBZ;rb^eU,@jvd1S Ϝ_ޜkE]v[~KIi2 ]B"2^TS9|-uT?=3P H \w@ e_ ȧxF'%Tkt9 Dr{ّ͊/*G>UEנ3-G?.7Ix#X$4ޒ`! %PM1Zw=ȧְvp*Q7?Cĥ#!)GlMۗAzJőOU(UKK"Ft^wITi'ӊ3b>{(JuccQGX=aeȇ2g[e8n6foyD[eJ a!(%WQ3{ȧ*U;r/d_xCaTj'BZ4;e+'7wg[yQ|zdFXg6"ʊ i;1\iMɀ{w/PS,y͏6UlKܩqR=[b#Wº l+{"n 2^TS9|k\sbM~9///{9+jؠ W.G_u:"/*_~,ȦqRyΠ&JlB̛ pHV8P|ڻ8j d{{WxӃ02oAL3& 9*Ӊ*K]ī 7}Gs{~텤&(Ԁ1e>o> AGXCSO(WCU߆#Lj5 #)\qg!9wklcqczh#ZT:3w73ĹJ4Ku  )D`` ɀC _dBOTf+*G2Z[smr1tMOT)Wl_tp.S"s.5);Y{^*xֶbwrW".CJ4qYBDVN.syQ!ZCYyM|Ól3+% JYAH|$h( C>Uլ^^'6<]ow#q]( pX8eJ 8}E>U5qW)Kpξ6ch3y(3oX]uE].ү5*:%*zw{:t 5 lAX$d5 G+Y]= [rw\ŘHTyIZCknj345ݠWhx}[1qQ.o+’DzW9" .>x};qvbȧ\{tG^d4adZ-HX[scD2j$';Y)Q͌2e.杭.?iM7LVD_ ey{Kymϸ7 rk@Mc/רhXE6R< So)4%aL˳z~E>U3'%=8 hDAt$(̠׀ 2#mx0@?q~tO_G!)IC X QY[vUR<8r}_E>U媝5v68fx/SRޯߎqBTTHI,5<ȐaF0fTWCW^PSlC3usnU(J^r3bf OUHk+B}a&󙥽Gf E>QU^uI/L^d$\r>"WMsFP"`3H0lPz3Ύ?ҁ"݊YnD  hh?* ϴXv>o_'!qὊ$+~7Yb,$WQlɀCf@OTfOsC-} ̭ FU}: tSxLz} }*ΤhinSЖzYm$n7[ks_[7v2cϸ7|g=}] 1Acv.\F4ށʒfZVJLj"  U.b6(򩪕u66F]L8/=I9wTɀC_AOUo'my#0e W-\bR Btgy)3$7$'r@FϥMᘣ/x" :ZQyƂֹ\2ґ T`ަ-9)Q!抍hH`Оi$ĦB!>E 4$`>TF&/ 4 CHlWD0i;1 qE>Uɮ] # 5s۵N'C_0강Tr !n/f[aҹ>dM(`?}! z[YQ܎huFb7{N2 mB \8ܴbEP6X-cU:|ӮOYAZӐ'zxYy |ʕmLjGߠ02E4%euui2 LAw˅0_gW!hSGA& ^m҅0K։xa^ơ"r7ߌ!XeČ!fe@F˃-DZqj\a`MVWVWT5r>|1$0i-gAVp"q8b ַKr|+*>ZL[l!j`d lS{2Q>ۃs}Fa}zv-V·ai4f=Ѳg+zPP(k?APjpڊc_ooGڣRNU3 vu'ǑCRwVeo/unN!Hֲh{W %He[ 3Xn[F?\dc0nba}|'goISe.$ pƜfb}%>סsvFUN=Q<Y?\^xL80.'F<_P[.L+H Q}4)hߍލ: =Hd !QY:`0N>]Ryu9O;Lhlu~O9`:,I].׷w 6-VY,0N"'>.7ctMj[\D*jʼnLWlvO*zb~ ֙"SĨČd}Fc⌨ m{t=P 7Ni8_H/ynDOƱ錍 f %'X5SzsxQ1+͂B{iDЎfE*AZo hB;<1DxL: qy-0Fa"k>8ƍK*2=3"l;,ef y ,ݐS{/>빧 =s^լu :X [)EWxUNޝv CVS5Vuw0WYaؘn3^~󠂩,p2Q39θA\vRv!MsAu刺+&8<hh*h"11Ʋ0k&&Ql-\N9WhG B }rv/ ^߆ҹB禎Cz Ԉa>Ȱ0aY`'ʕ~ZĮgӽpE-BC k]W G|yD>(jK'A[L,4S^sSY4 w'o^fLڲm 5, oz=S eZ$ * F~ßռ߲I}9_`&dx{ 4D\w>CmP1_\r(52v kTЁŰ_qUkWw))߿enb\˲ F˧:>~ C3+lŀWc-.XG/"!2'u[rt2|>\5jXe;7jèr_#WF},2c@/|̓c̃P l̈1{Dr )S>O;NpNpWsuqxأ?]PJpN6Za ka"\~-kNg!k\z߳0Lwԫ a&{l8 YxGo?C˻2na>xZľاj "Si0jf?`fC {KKzW~ .@1`:rs2ړRSoQۣ"B_Spg}t*d^w z BF߃"[⺀ bj)PAC"e=N 6ݐ$Ȉ{@C mBhE$bp:}&(h|iIP v@FF#}f>*>kB!CY#L1JZ(rGY7Ymw _k@6b26RP56;{@CuxrHAFlOQ %5_n*XRb'/aQқz]\A!>AF=),$C/iPz~QB 5X1F1XH|{{3[h!Ђ¤_Ӱ 7wyN>߃s!OJ3}1 7 f׫:u89tsYbI,a $z[< YQ&9aY*Wf4c?n kPb׈fQRuk94c:Q<[; ʎ(P =qvl %&);Y/`Lj2h"jR1>֊#e>Xg,yxIa?7w|hhBC650;JIKA6[D!6bO^.=7 u:uNL*w CЦH?`0z//ُ& Gdq8HufxAN GhCEo(h#(,<߸;xѿ:y~1A'<6ll=&E X)O0gC ;k!PD1ATѺNəH#-uG k8qS&XQP4aTXKϹcͱ(:]3B#V3!Nj#g6ru\$zk<b lCR2C r#p8zsK$2;$' icCjb->BKgzE4_8ш8k_l-b:yL춹6Z^Rzco{c@ {>y4 9ݞ w:~t*xa:y,TcєWbGA۫:y~1AT' QYb1"f%-QSe-پ< vՈAx z1 y߫+CFo\ lƨ K.bD RI~&T'~Kc:QKܬ}]Us&"Ƹeo̿<*U+G<@s$† IVp#rƙ:Q/>k|vfe,7 U^"ԝfcBw8f4քsCC3\?e*z^Щ8hኳ~7#LUjO5 囙+現-B9KlrVbfb֔eoݼeSe"B\ 3LߡO t"t|aUmv͋y~smrsE!ؔP<_;+r/٧L'ϲ`eK\=E@KN35G8!:ZG5b())]UJY5tJo٘ Jvv H oI0pë]Z=BLD2fm~;_ x^{'Ϧ"=La>CXl[깐(Dzasi<@O:ϱ< ƳWQSSE*0KBfAt{N=Z.v{Vx2VvYȳ>,mXx@KYcBB#ÞeaY;0r"hdUgr{wrnx^)Mx?64=\3p.% /> &G ĢN" +S9((e;u@YEc*5bS>9sNy*"R;/<離9y_j'V$) 2I{Hg0i i탛WhZSȒ< *>M AQ >/a(*v;™A l`Q.3ATjRWG @crxʎ)،9'&&F1V FlтOB8eb;Jƿ{\Û4\c,;Md¶ oOk0 urY JyJ bOP'xcD9N3M2 hM<-_QUoX"g =7F/Jx&eׂ =<ا뭍Ni;IN! /qNA5VC k!clc9խa6[OoV{uJ|6Tl :\ * u~1FYb*A]7eñ]}\ӯ?vLB(-Yc2H2$+B<$Óm ,M {vHP\ydkfwu:kHWMxE$IKP1in|0vYA3x)zr@c Ae7~Ԉ:Kw&g AB\pNSf4F ;T0،݈AFlBw#bbuɏ} ̙` Uo_+WMa?w'xDoG1TNL2)q34GZ`#)]ύ!=(JL4 gª0wHlѸ g}0*w~mfx+(k(=ў+ggvHG-ۑa M*бHFTU^.XPg%PT@Kֲ>7ZthNt%FFKP3$Šk|X#ӪAi_al巄5a>~rs;/*~ZwdoM6w>CC{$2V EI|Y~-U~c1*ƠoO%0N g*3Q\PnEfv2*<7AD]SU~ǕO-vkM?/Àv1np-dAdŖ/7B >vq~vؼ6Wp?x,/{Hn K99 X fdce4W=;?nKjKm;I6d}U lXxwhiL繉p9G4\ݺk[̦-3r=iNRΊ^WJ~fQ3n9F,,2g-Ӛ `183&H Xuڭ̷oY?˃-̥OS 5az7l/7I+7L_7PUpq4a*7\d5YF߆I9xivg>7?N?LA6?_ +lu^)N9^=b͎SKC(\nպ[;LbVY@®`w>O{餞f5/f?o ֧ݞwTӢVd^1 _ P"Jqv2`G3#p/d?+9vjJ8 a72:jr Z.s(.^9:Y!xzt2)@v}f<+O ;yUPS^}Ha8.xO2~Huoa ̌(ZcXJyH!yDa72̨Vdy43 0iApEl[^|XԽoa #T_R1R͓yݛsYƜ "vx+۶˲@W#o %/WZYU6/fXI{McRY Qrc* 2fEkPlMHՀm0MrTG9$968!3hnY0}\=^"+ ݝ+շT8r4sY:[88YPӈlڸ z.<0OՊbq1zF^Woj7Ԩf]4=Vmu%4(ǽ`c_+eAe qͲF6D ,T/öT_@?B:PRuNsm@NKe@Rdvu,JWZXܯO=ztB 691[$A/Nu~(Gscd۲%ȟ/=z03{TTt{r>.쭩{/>A8e #.GVui$3!rL3ɩtqQyq r ]b BFTjdl!o8eX&z n n>sq^|H1Oa~1v~= EO6Å3!eLXό'\R$NdIr.<Ş8b>QGA,DnLbsݎKq5[4b}nGx8mx1H}N<'dUMuo^yg'|OӍRt|tâwkxz;@cG吩cB'd*'%\^V^h}>-=25l%] mT[Kya kn#Gyגi1GdH.iwc-!~/.4 K"if#|6[5;8~t74T';0j;8L W_+'5X`k>549A:P{NP99łF9nZIZ>ξEaI'(iE!$)ç(lwԭCYוWM2=* v٣|x@Pa ~|Z%шNlD@) \ ;{GQƵK'Fp$<vӌ~ )Q"s+0b"nM.3cl rP;V(ӠD)ւ (4DC'Oj;Jl3}A'/*ՉͶ,O!n%6܎>^5W=;QKr* %`i-(~f=pI[֤꽜pPʜ+؜0--1i){dkfX ';%o?;Z+c%O,@\YV ڪ1! v2ǗqͶm-HA[t[ OgXz5^ozkÞhY#_cxd5gPJѨf;2T.w@FvxXY݅nڳai2Y}U>J XYGb̈́b;.9t{G%}xWueTyj]H뷾ޢOP5րڿ^KBBPMah1Wa.Xov]-lr&T at hy0)G#IS & IC|C  `jssu\rSz=@'[/vY}}sH,\ oY8{ ~>z=b>=(!0D%0]EL*xWIkYC&p!W%! 5*8Tϰ[ϕaDg\)NKZ{b00d沢kɹZ/5u>r I Bܦ)bD 摃~ZX}NVn񰤇5x̓R IJ71S= wg~6x96R+ 1 y۝sCk= 1k@9| P&kpLBco-!Sa]etS82Ql4 8dR=v|ç6DJqㅍJ,:*a@ hL2ϖ88c\fec*h{cc*!Gmt޻=ժgPziYw%U% P 报݂ذ?[PsOHQ(4;6\Ṭ\JIau#j,u?U=wc,;U#CgqQ3tkR.>/cUjNgp闤hi@F(bVRo"8?4ͯ4i#^U!*mSpiD'Κ9E9:xyPM[ Pp% 2[guT?>R@ffA#Gc"'+` i*$-ZolʚSƚSBܹV=(S9u&hÁ-~K5BJ+d/r0>}wUֶM7X35#Qh5uIm!xdI8'lTxKZ?O(m8+,M8F< )t\Dž%2mEOW5uB}C`}az U$xtc@urrvXnj^@tvRڗb b yWNK_KQfSﻯ1 qL%f-ayRcE|dy?Φp~+QQBŻQ !$#=B*"Y%Q9Żv'BU -F9vW[,!#\g ]؛!=E%]lTWo{sXǃs;fRVqIE‚2@$d A4u4 u6iKN`=z# I4gq@b Hcv,8 J7"FkYvjL>+5uﶁa T.e~@1d.)Eqן/y͸3Yq{*)[dGHHīb*95ӉNy0rIT{ lj4CzQYx|VT|^|v/PƄpC$NdIr.<Ş8b>QGT%ώg +Oe9{?-~ۏ2ЮzF{'dU)a YWWֆ#G#T9&ĵ$q&p43tut҂>6%؈8NuN "L@<*6)ܞ(_~7PkEi'N#!׮< JﯮjOXۀ ShDoo1Y%SCR8,x&V;D0!`"gV$u#,gBS1Jq`\q% j :QHQ/zmvǏ' i,Γr>,NjS_) ySx2ã/W Mu;3m>}uÜ\/Zo~ihe@ч/sioG4eۭd:ro8N1L'%.S"CɈn26t5bV=ЋGS*/S.]* B x/#tW P$:#LIC%Ŀׇ&_|?|ݗ_?|__>/n?U}iWþ< 6iwOi^B| ӕ|٥e˰V @$&^U+wu~Wl(0߹;l1 ZL6zJ IT1*8^^ν`[sk l&$߬&COԹb<&7֟zR5{lgycN 3=>s7zTDC`^Nzidy*yE\^dj>z5xb};_pJz]l޼j魹>sʃ?+|VJ_TV%;=tc!=v= ܮDKv>:£=`K5T[>0h8{ko^kϧrkE'Z+D (6k 2(Q), ;<hXZqhk=K_]z} @pN'+*ZO|/nXL?[7KIVAb#TUPTO,'{\1AiÜM_<8bMB[Ϲ5;q2Jp䵦VDC.o%QȎ>waTZ0f\̈́yx`Ebl]Ow|kZ-'LxqN { Q~ɈCHE[^^$J&2WVw zV]4mկ<%(Zdr9zv/S}x+EҋzA4zA/u%NWTf{E]A:L2ob qw-|jUJ8oݵzr6 @8 F])PA'`dLNrBF5: Cg8F뿿%|ʣ<,!NBIV{ iuN nq}V)I2 #FKE$$ "qEjC]9-91  r4=.^bkp`HybbqiP.9Jm DV=GC?>U?~_rw),HA@O¢pjcզ9 Ӻ 6.hIG $ UQ&91r4~<^]h&-ĥ"L[xC3ïU.gqsz[Y}z=YGWBo2ZuqCX/ (.W q 6oG #5BQ!>I-6 XpK$2]06y[6Ac֢#p ҍVӔÓOT̫?=1Ξvz) er 9d%8CyBD4C+Ggu\_) 6|zEpQk/uy 6Y7Ä(pf M 좀kcUK%L'4cH_}͕O*c%ݢ9d#&PQ5⓳QA18p^ z6l ^ڨOȘ}~xc8*H?q7zoE^fUy5Ql?xëf_".T\̂ v09k8iDEڢdb# '4s&sWpZd% mptyZX^2燷C5Y<} f&e(rMrC18~0,\iR/Ith N dhbykPsu32<7Ljd Ń&hܤ:7f {^ƢLV ug*_hY7E,\TayHHD"74{3ٝ]5y0QKCui3LAq+!i)幱|8ÔT>9C*adp(*7\{mG<(hWxf}31Ơ`Y }rk@R"J૎l{rFq%r0U+Uw=, Ze0FJev9s9\7w- 裔Y];P pJ&ٳ.8~cL%PC]pg/{҉CEfpzMDy[  Vc+8BLjܾ %(ƨTPk4J]Q+(Qz }q=/R4Zs{QhuЌ;~ (Vx[&;$°6jA-8x2JtR (ml(*đbY,Cʈ[v~q䦾xv h4:cR%.$NA^'Y R9 b//8= JFuh Nv7Հ [x&&/XY>ձQPB12$^midPJ ؅"_0YnQIp szdn!`ĈCRm@zknh4Gޣ>dCc&E-C&\Vz ?͖hU,k=/)Xi0(dp4S+2'BKP;h4ǰc\% ]%7q3Q;<_;٣ɪ$A-8BsQ&Fm])|<ᢸ7$cǓ6BY@l87invTa" /k* esBhDB.'XnskO }9NNÔMt^lQ+?$[ }~3`[0/פE4Ba5U9496Kݕk Fcp*(zQTo35 `bB1Kr\.4N+jszc@*>ϕmQaC.:/Ȏӛhl0PqDchjlv^jK"% AٿC| Meqաǒ}҄*$0ɆHG ,mv{z(*zvpc$jdyk((;l( Նa.gET`|!E0;-lR40oH6Ѹ,煽-v{x7ƅH7gr³Fcph=)>od d,d6N8pl"ޕۿBLE'1Η Z|H oJtIaKwuuu92aF>&i`qna*>ݑǞgSCN:طrҪQnM}GaV^陸,ߡ{yW9>x.6p},a딝Hӡ>֫]1w6̨<u2ڑǞŇ촕A}aY \,4C̒U{.}dktmSeW y .-3ZjǂKWY*=+g$`69;yR9`".f*0rW!Xo~# zlkt!]nw֏ m{ޱjUd&|T&E#`1tn1`x*`d1zirN=n}m;&֑Ǟ9L:0rrNI[Տhs~KntT4K޼{2('uYXu~?=O?5& F͕(7?Cע|XK~X1)mqoOMk杵txۼ6 >`ɌXߝF1NݫbV}Eڽzw4mib&_3ysG0+cZ|83g~]IB(yW)#2emV(+XwVּ)O#q67Hrev:E2kfۧͻu4шh.O 6·8g/ D >`-g ߆߇wXN[MO'?[\M–vZvIWյyێ6y奇1j/t;X? l-.PD$r!{p_cR/CEdܖh]G۷hcFHG|FCL?pA"jհ/tsie*@O庢;s6/Gx>x9Aٺ}0M}V`S`5lo(#Bm硴ϡNxz:_ l~|tą}{N}\zL uxY>n H)LZApH 8 _) KposSK!#yKo+!,t+i* b:B}/_Pλ@i]Ay2?&حS"8_(_(J+f|y+~`$S6q4o+=⤸] [Lm|@dcjyިoafY(Bf³ /gaCvkdp/Үq샤 Ch$3Lp, i ¹ Ά&=OƅO̿B~\=zGt?ۢ"{Z'ˆr^7l&~ ;hc9Y,gyW/mŲW; 9,Gi2~g8<]6'VOO~;:ݧ;|a]TVPwC_ALEEʅa4J+\|CelxPIc9S1;|a<*cJ[g\9Tɀ"Bt[E?=C[uX\LiE;xϽԁP͘] VQ;%}yr]YW2e2<Ώm~tCoPͱFg 0ae6T>eJ srhg |vو^|GI|&BJdV}UTUR: x9⿰]eC:sc4õvr`$K_j|$Hv}Z%r>]}hbf^RS3C>.tO߉`^ͷ`gHg ;N7-p0a>1 T@}t(S8 vؑ!g'~mghŌp݅^n؏b记gqreĿfcЖf>5u][_3ui#M2s{?f\Z:pg5[xWryd1np!>yM%KAJ]-| "'^bQکI:) +$zN3@s5i- FcW*gi,>h)uҨhi)wڈW.GE QqW$>ZrBBf0!,RJm\'S$Z'Oh61~w)Ve%VeD)js 3jUI%'W=A063ښ{ק?L ox(RWNqu~O[ /*&ڳ^),31!X G-XvWT.JEhoi\ٳ:E{^b)IX*hf^C6Zn3h ;e4^5!D :2X{(\]rq 2.MV^v 2^S)1Q5 (_x?0`-S!Mť lgdFN 噴UAŒ5PlUh298EBQTc0mʤ7)v K'djZCJr_ :[%j>cɩb ,.A`+a*JNaJڬA4̹Qlù -, h_DJRR!hHKX S =oAVSY2Y`f]+,~,> vy9<+f@57.WR̈Z co 1} ¢^ԄÜ/p J%[ LREٔ<80)ЙVNecX"V#)oLWn_!R2AKPgԌQ"]#(#u VSW*f= 0"|q 3WAHaW&"[\RªQ^Q@ISDW(-/=\( ґ*4gwꄈ %EDY) No aXxኵ&j_])a4Ȁg:c Ҍ/Vna,3O=fUy' G1E%eA<ڟ&nvlL\12,W\s *Vo]l3ǁmZhI|(bᗺJ{~W28Ut)\]ph)􀁉lpLqtALj z:.Q%(  H&bZV">X\yhsx -~ ":t@2x nEd06M‚^l"`''iV06SRe%) dXzoonc }x9TX] XѤnK ̂90`=.Wn@l \**@QR"Pa2H Nb&AK4,Ե+Z`8FL-Fg`*@( ,E0ZGuZC4碦r8BV   `D9Mi׆p߼Wk>sG}8nh ś !u>fDsh`W+)o{u63 ;"PA(cx)U,JGIUk0twԣ~z탂ȑA!ٻ6$U`wLuf۷@.>_#L I~c("ERhg]]]]U]*Tm =VZ81@tcQBxع55T`cDVځ&5LbŬLrދR );[3[ a}P5׆R-^LY@X+A '"A7XE5 `Z(t8/j J^HtJp:lV20a=9̚`z 2 )ʈa(.D+)9[|3,¬'RZ-hYk9՛7.Ԭ%v䜁FkʷsFwlFAn XmJq4m z@|n62)A{04ދ>H@Ek:zf|˥5ejZcKYnkHoÍrz7 H{X =Թv)Ö+˼B,b˄X&2! LeB,b˄X&2! LeB,b˄X&2! LeB,b˄X&2! LeB,b˄X&2! LeB,b˄X&2!*b)!TA,#\ b99SG,#Jb9 ou*[KO@P:!uC@P:!uC@P:!uC@P:!uC@P:!uC@P:!Sjc>[UGj @$sp8 uC@P:!uC@P:!uC@P:!uC@P:!uC@P:!uC@P:O{cRPUh:/ڟ0z0Q́,!3_dös\iGϕ{2Zi@K'ng=ƃ &88B=F[}>o ha?ey rR,t85wףq"}~j>7KYs=w`?rke, )%,J3` Һ\Dõ`ؠ/ٿp4`{@I ^'yJvtjtYȩimI`?MR2:Н\fݰwsg'k#2w#Zq=/hTX}N 㕕0 {PZbhxQ Rcώ=;ߝ;-csG`ޔH׹Yϋ~:04mjB+SiӶSkYW ce<҅e]Novw`l|1|0cx񇑻BJ^ʧsrny)@`ހ5[uTiYx10nwJFY`#es'Ko;xo [v;O˲bTD h|HnY}JC>C` yD .ˣWHnE\!ɋ+R3WJI^vKq4 UGsCԖO^\!g)*&¶=5:~r(Lx?F=N68#&N}^025eEԅ$MuLILFw0r0 wuvƼdەʍ4En'RZi/;`9T+oh?aJ19. vm4'n=MN^cHR"iiA q#NqpC YdQ?ٛ/o̩f%CȾ =sIli9˖5nʹ7x!Yvʱ[q_{,E|h>SNZA>qxҬGYF 0`+4$j%:z$YG64:Y*?/怪xu,R^U1*T磊Lu*ƙ^I ,Xofuj5(1Gs ;L^sUQz^IY{L~ޕ- l .eUgOYPC-(i7Ήͯpk5JZ?o[1z DEmuLEKQTJ^knU4/_Ǎeq,"2xyⅮVv0ihCbe"BnM }sLi๔UkD ւmю qwӶp |hX^Ks4.fwSn4LroOLvGx9͓13A)׵pЁjЎPvbؙ٫}rGlve޳9gޙ`wB}E4q_N|E5+j phx bFg;dM'n u˔F&ɢ[ByQs #CJar"L˄m:LH5B&T4ʰɄMk|p8N3MId<-/dn6zµq9Y'W{ժ=E)8ԞrÅϮsAٶEۀULmGk'p{+!9?&i@D%$N !LQd nk >u.ٱM_ax_\"f%1ǫO}`0̾%r,I_O=p˩÷s#/N= 6Qrx t.0^Ӝ$t$ST}pT%s*gʄ\N&<.\ԘYtmk{{u^Utz B{|~c-^U Re̖dXA?]Wo1Gӳbz|{{S~m`KZRV*+ɢ6XF2楫4aC q5ȬDr҂hd \iFcRٺa4l܈\ңb@xO&=sjg] />~L*~m2/imKy;7CGHtz6si*fA7ܽ@ۚQ7Y5&Nkyj[0XU֑ ǂa"rLH]A_&?WBk|TEa Ka=v; 210EO-Vuf݆{gKQi%gM*UXU(#f.-C]GmOljG(?ӐsM[#TX6Dlյ VcQ,W[#T"U "h#}l-^&{.P{>Jn;{3b(OQ l<R9T/y#зBgAۉhżEwƉ1<-R >`XQ!}UO;K)]Ywo͟y"J)EujoFaN.Ygnβ?.m?қi/ol07. 6"IEJSgtX^`y&}:ONcguY[ ,l(ϩU,?|y#_#_d|4λ)$`iپ6m k9LGЅRLzt-rS|h5K)wb&liLӷKmu1 +ۯ{݋fnYm˿_On/EeORj]7|}7j}7mfy6 IAoBy}Mxg;kpUW^lz]7ֲTlI_l4f<6}=:Da Oadh)hSiǙ/=|^_3_O0t ؛_#܃[]Owmءf] -r oYCyMoKW͵UO]/utvnr3rT S+tEUSͅ**+*TQ c)`%%+s[9'揧2^J᧬ԧlqѼoߙ Fb2TZ:ag,znzk0bȣk\ˌz'F?O,z}հ#`l4;sxiǭkif^DH՝Q\XTzЊG͂G`![dw뮇q}T9ke㰋R`dqҼ>)pFFڟ$Hф߱5>Uys._cܯ{'B1*>E1W1UsL՟SW/(Qgeӽ#3e--VGdm!̯;Ĥl3K`.a5vG~BqJrCU憙PU`TR ^*h\8V9d pKfzcnN_YR%V)بz)9GX@.f/ɓ!&=4˖Pwo떾Y7wi p`)W?dɸY7`}&xFp7ʀ)'Md~5Ў~j^s?¥^o.b ރ.1c K^ _֞!LGq՛_Èkm}2{$[OPVf?6V"l )萐cEZ3 6.V3&B׺ F~"N;R T {rCHtP: GZnf6RctIڐ TGQ2~pj]zscu;ꎤ0= l/|X] |$]!,E.:Jl1M)B)ƅP cc(jR%+kYaR>@p)ʣ)JԪ'_ Uyp*tGVX΢iMM24^-~\t@7 of;xXY1. a(%iPmksu}IZm:z0C:]F UUYJĢp&aO5SAKSR ct%@atY{:_^ԝIRi*E9֧MmU!TP4b/tn~+][oG+ds!~`e`OּEϟEȡHqHrA8ꚪꯪlUoAQIT@Jm*CA I1u֦tmfgЭ< f8ĬY}H 7V!v K.:a bߙ-7K7L}P caAJ8ǂ׃Qai:pSN_59-J-AƒEmC _|,D INIXЎ[ n\( hW{ `>uA~1%?=1GVhZK/PK~鉿J*s7`s~;*9r9)Nse33;~a l '7Ʒz&Ga^bp/)ZJMg-(d`<>xF:KH'jxF:Qɳ~=Fq .<ڈ`IԁfRqXs\mFxa gsWR:n&ӻ?빏ǿ>p6RE;Y+y^c!&V)LÔ^ʨLT"Aa#J9XI[TR y1rmP"hUGY򵰧>{W,X #(CQ#)1aކy @ԤbJsFִ}fН4~]Ek֧;A!bb1 pa@TT5Pw+j

Jg88H{WmlP ~AH҃:eu6nhZ j~pѷA:ob?F*eaz<s$ OԔ24{qHriH0*_dM#X#N9X>; ߟ?-FtD0F ϸ=Vxon1$5$[0'ϊ+ B19Q ,͝"v^<tb@X +R"-"+"j]JA)eеpm.'צ*@@P& -,I˩v.&yIC bb,g7>TiOVwT#''QwTxIXo{Zw/NM`fDn_W` @!8-ϋs]oOT*ʇ»yк0dFL}݋˫1o\|{ގa ~3vB-Cn$kGb|HݺaH0~ya9ׯ&p0q1~s7e~h䤬գN'Y7j\^IhXHԿqѷC0Hf[`OgC]P2î{˅\8~ާg.ޟ]`.NyzqFu$(5 mG؂>u5njho:4Ulu0qbMNfܟǝ+eU <͌|gYsxþq/SAh:r T s[Fk:ZC߮\o-l?. ܽI+3T㠍fLhҩZH+5Ԁ7԰A~hY%6[6_i\\u皚c.L8 >u#chH)b΂wDðq8׶ŹxqUY{&P²f^DfK /w!v?d~-=YC^ MfR˕b'b}TD}zIv\0s :MtcgmI %ȗZrV:Afx^IHHM­2Ct{mR!Ձ !U}-Q?&[nK[^b$-*hQM>&|C?fr)hEaN-;›n̪:hv|N1\T T*q"XУtȕX*'j5{镎JDt V:Rf]&B꘻:殎c꘻:殎/՘C䏀 oI#M`RGTRJ-=@~XE#=W [NjWN)XI,Zr#tn,d~H0H Bj~'LQVUjQ9<[Ƣc\luX2tie$XLjAePע`=jfJǀL/kMRB!DŽ3@cĜE NVFV3XkQ^̚`&zz0߭VCf^>R݄mn>CA=̤=]ց5ڡfI.I9 6ʢT[60nP"Z&lم Sro3U;pcUkϛUS KXdt\3:::"Badepspm;F ъb]Gr& vRm_gmDyqCKr\ٿٕ2~N.ɕKr\+%2R@(%v+r5>-;cI࠽+O0sS1h$( g  i1r Jpi@ALGT M\},JT* t}$F6l/l72sK+Ҭ)Z!}]ze_,v9Sqt3^#hQ9nDcb93 E8 clL{lv({d04U &}k:fޣ'3Dɵ+ai|PO/Zɽy|idt>RĀ^c> *A,Ji"dq=sI`9YJ׹b $Tu;"E Rrꌀ`'"KЈ`@C$90m0e/U+p85S)ip`\gORr"mgܞ}kخ D{V; sR2=)Mj,][^<;Exަ a V>"#"j]JA)eеpf ]NMUXG-6`M@ZX.*,'Oz2T;۵HrzJ8c Rmu?b`*VQӿ랜G#%U)Ga˭u 9}t fF$M&u ,j K&ϋs]oO4g@m|Cuj<\\B 2#k՘Tv.tU_oGU^)ߌ{ڑ_>RnR? f^X@i}+ L\LآM={=Z59)kG%hI֍Z7W!8Wa SIs7.=v1lq x?T ?7\w`.ޟ}}_~p&g:p`\J#AIm;, qSCxӡb O+kr58p]_)(V֯~Wńߧ</VGۤ;#qNDgт[Ku+A&r! e`x(xQΨNҾ SRV%FTr圗dppI#(x#jxXt MF6uat;R|klhhL&a΅:Ltʷ\ra-UHttKdDf.Kdft4AG2/gH/˹rn/˹zfLdn^n/rn/gE8'Xu˹rn/˹rnڠ$#*fhJ%r9?R~Z_z)D\\)0%]}Ż|SO}cו7:|s+V׸+`o}ǣϽ+{C%oη ]!ě"RS?f)MLa( 3>^X14=oalkTOG{a!(𜗰͌;y;\nl= kG^Bk^)^)ϟAL4%Ͻr2NUAװ (Kv΋SP0ʹLϸuZiѯfPě~gYx6+BJyAw,K[YrD:[,(MwS)jvvfv^;;{x>;3OAGV 'ћ+4 .bc#G_nOcZRt \QtI4o@V >abۆZ)Uhĥ~/av|Ad;<ea5os^߮N/E76dcL7E4x҄0YTc(Թ?p ^H}xlft:*BirP[9)3~?1Iz\ј|8 cZo"qiGϽf5𦕀Om)&57J6"wiWsy|,͢@}qO[v]?eXz ñp,=KñPX1Dc% ñp,=Kñp,9z^㯠TztK-`+~K*hRi^h, /Nʲ76~ ߋGWW1_yͣM0=Z'mg%>4 ,z_]&1Eq E0Ȣ?^Cą}a۰s>[;LӋ(RknPmE[D")l /\ 7[!^/VFuRmi!S]nS-KZ۶Ɨ-VG饷#i?]NcM*+Ħ?׎7\a[l-pG|nWtݥM(hytv}vVZ!ԥ=غ3٬n\{x}]%d~1֑[!+[Пb8&xnZ sAnt~Q;~`*o|&pρZ=>(5X-ir.O@tR^}6Cw&/m(jkO}wissIM}'yZl7cڇtYֺv}CG7`'υilxurG#.hkvlyy0~u.$&n :E4"&I#6ib c2~-w [m>KBz7nݴ8k7]| tڵHmhN0ͩ9Cִ 6˚o8G^xXjEA\4u1 fi,Qc2Km̔kQYmbB!~ ;%tbR(Tk%i__k[➀B6i;\8⻻O`/ϡA}Xܻ9٪] \r$ᕆMzb:ǫ#|ml\ ݠ{>ͯ0:!ܹr.g8}fa,M`TAD+OK)Vq xEIb18*A"}V>YDYX|)+9;C:,oȹ14ViGQNskip*KT n5RL"S\d;EcYoԿxdX!jb|kM7_ЕX>BJj̽w,6 Aeң!=•~9GW\鱭|CR;.\@/zgO™O6\"͒84!,r5)Ke,m.Yn $&m84?D~;ڻ Oy%Uh?@N*ܮz%iX~ wEDh[ w?6r+樶xxӞ/mVq=Aq}ܼm d[I+ڏc 6G5SZ3ɫ6(Fr*@$8}$uGڞ~"am>x<<ey '׶x=n{}쑅l-suĹRQz ?[Ꮮ<7܊tjPˆZ9Pw?*LDD7Q؀k.P-3YB35]ٍ%_IȨW=|ٍ%rUj2'&ܒE 8@xTPٯ% zبӣN㭢]o$M}ek?HW64\PTAK2#޾\}GTy?9Hplm֙wϓEˊ!mFKtVg ׃KjْRJ; ^y5cߋ|FlMķ״`L\owK hFnmwr.r.m5Ȯ+tdA`EX442ΒYՃ-] e&nAC"{J6!NoeRxl E tBm&l4gn ٻVPu$x$>SbX5md&[='5Lf-K^m C+;HNvÁZ爵MLMKk_0ނԇ>ﶆN\O.yN$6{*Z Xi35\^ᶥJh%zMvt;:?ܽqDM8\[Nɇ+B.o)ܼz,!걔?ֺ%Z{bFM=>B)%W-kcj2Z5@o L6ܼl:b_hR{1#m|' \slWfb.\Ao%9e7nbAk@o I27fgFܳ;}\ٯ? sьxr3J?%3q#ufxbR̿7g y 4y͗]<,='77$+Xy[ҕog7nY~{zLrvƷz1L,+d3v15ҷ|.)10rp3y:e|MtC&t4ɳwKl:Y]QoӼ >|o,\x}F7 WU^}lM093h& q&,N) Ȩ؊%:Iai Z%*c?ޘ[o~Vjt%^GE. RnlÄ2 cMJ#\KLDHF [G2F KI_K->-\Xw@eN-[7[Y|fsP/=QZo%Υ/Rx,U+z^6ΙYŹl(MofmJ,>.d%we Nà?xzq-k %wsw9 L,E0q1u,Q "' FE*c(B+؁5. ^uxo,…딊)7ˤ"YJAOpXŠp W@X{l:źX?D;Fp,$"@L[$\$HcN RzA? ]: fi,b' V>- ^ 0,!`JKҰL مT/,/@Qt\q]:M3!F64NRi ;œ%.V^RB8/({1?>cC~L㒦iAÃA:k?+i 5%ZBoFIAhb0o:;\fbc? ʮu_ _2ϯ3xj '|Vv75ۑY p>}9˛$ _u7y1W[sX;E`#6K1b #sDdxRP,sWv堖ng+@@Scl,B.6isiTn7[Ⱦr&y!X[/S㓗(yw;6j+(9B7WgGUsg8Zo SCt9xZ&}bڜo,?P|&?xQuw ,2_Ft<,iK`2ƛ %qn-ChH-;Gb|HW! XL Tt2]:[9wMNʽtIu\xH2gofnFz78CЧ0m%YdhR~Q|W#;-ИL%P4Cuskz3m )[;ʼ4Ye|GOaPtF(RkG+׳=)NfbƾL-(Kwy泋/bJyGڲjU>o$y:e|`DϵdԧcwY0 Ea,,y7p_G(+[! N"3wL X(Ս,(jm3< >lVEʏKgw궳OO_> "H!">vQroNdYr7;Y'xt^J;jX,z]X iv׮g⧕)'Dh0esV vS'b:B/{Wq@8A2R߇ŃFa / O-RLR{ǐ␔8%y,q}VUN'眐v]Ȱd: y Қ)RJ/DpCLDK9 Q 0֖ֆ]8>.{d[]`spmWREVcQFz.;[fd_#XFއcqMoOdV#O"؂9d@FHi{9ׄᡊcVw=ޝll=_b&A;mY`-ց 3L C`,qύMu5\0s=)2@JF/tJÃ&NCϡ?eBOkJR}Y1bkmm`1T)b+t@HmojRz\Tkhb^Ku70](+}6C:*> UYt:~C8-o"g9]57m}7? i| r._(}4HlKx0{^/mc$FZÙTƦڨ-Pv.Gifr9>Mw W\cpÑO;>N%M&I^yq>֌DX|}>5`Nz*Qoܘ_f/ 7J]<2(,Gљ,KÙeqr^N |! |o"dUlN_]`VG 5f{Y|z{l=op ׯ촎'8dVr6ͷm.\6rdzr~/^:>{j;kdj:RjKf(LfO#ɲ{ 6la%~(x r<쇩{][X2tiXLjAzZٴ5)nq]^b)#ظӮv} UP cMwȋ~{^q(5OyˤjI7f1V*U:CȊM7O㲗k-MttMyM-:qOk9V>fu2Wsͼ߶寭۽@.l.-waM߄)7~MH1䊠)WD8Yr%{ɕ:"}D-"A*oڪg#Ux~$*@hð=h#I2X>dt[Ei(M'2MD ?uOd ¸N1ӯfgA󁢵zY\ H$P` 炌|Z1yqgzjd}x)^Al ĭqȽ9[Bb8tqR(W,渙t+ 8c]zLh' x&"T `@SH*¬qFǻt򼅹BE{ͣHw! kCWm-v ߲+%FʕOQח_4k#jnAJp0{1Q3}P)s;xtNs ?I**<.~IyCSs8bsxܳ: 4|6@_Mr,^tX5ǭ+c"wZC{]쩠ӱ.*Zfd+A|X<@O%X`T9F@G"`@2""&ZH0&$R VTChwScSz0DKxm2[Vg+Ks3yWSK UaVANcF@QAsglrD`  :d 2zr}MϞ,>7Qf8(KtfM8=0ez$Wʃ]u/4aTkA(RdKJd4qJDO:<}`~-nx/q؍8fr CUX ȡày;~u<:jyt]-GWˣ@BB]Ύ{rvt9;]Ύ.gG1[TRťתp m-jg[ZJ~8<*~vy{XB}WB))&[)Q[M*Ffd j0DVxw]Mnl9fǟn~J?{I'{%E#&h͵,$NEF n-ՑSRT-ʅ*Q⹢EYJ8S0.ef/6ѼR194 圗dpp#(h#jjep:@KV.uars'65M,fe*t&w}v}482L:94Uq9m?_ڔF_~-3)`[*$TL)y~ .Ai z.RUj3e ai80 Bk!h%u"88焴%g2D'HkK d 1Y,-,xGI4 X[I g;L8> <(g[;qU֘ E 51^zh1y밉yN<\` c%c!"m\Tu N3.;M6@5_bCZBKU N9~FuHW?(n]复NT6 xi23Ńt NWј2s @)2Qh&[V p-pXSbL\俧z8(JĦDٳYU]*D P4R<]~˯VNņ^"e˫40ؚЭ%^]Ib>\q.P?;rv8pt #9@otd~j6q$u `F:= ot7pxҧUNq"-\Ue5}8=h#1/aWo*+gCJ挫|YtfFi ?l1.W ?W7&(N\¤4S+y ,ʰ&=P 'f.]Gy|6Fs4M?_ohX(O`%̾,Lpɿ4[<2J%30zB^{/6 YtFVl|4A%5Z^!!]a8|KhijxO'бnb>OZ +ߛNU[OZr~;:VJ+x s4Mf'DXiV ~+둳ECDŁD}:\I ?&9wD%?>!0OSPkHwi%Cr>!>/ {)j#pcƦ`BnO !~+[#G0HK xr1 ɨSXv-w}.|WjT~uaeل܊aRQy͗W|E?/I|sD~h;^90af?b{N)I.!PLn);F3MzU+Ѻ5'DC˪fR!V%0LJͱt:YlO?jsk﷞M![A d{ %J}6-;xL_&1LRN%r!R۳_Kʙ+ I{_8y,s)q4iYl5O_o79C)i:zvЄ ;bDg7tt-}A$]6KzdzW/Ӽ"gV2˜㘦Ujߛmms.VyOg?Iu2?:QDڽ]Y6{*.f(gR.X+}ɭNwVwa5ڗh:]5.&8ݔ u( l*pstuP')Fg<WV} >yIu{ AH&PE*[ƭ@*3kP r&L _[l?R!cx#[bꔃhf@)se;#3]B ] 絑RgJT&dT..+B -դ:(U٢%eFhD[˻Pyte)7 \("\ YG]ȳȳwMLm}s!)) ^+Ί`4)ہ|5ZW_(͌H`-U$ePy<#Da2QWsI!!` #B %#FI4 pA`hH C= TA^v1LA::E\D%4Py%5L'^I"Ȕ2;d*nCԡmu:t[VnCաmu:t[6>%7xS&,3,)H#ɟݿBc3p׎`$I!%2:V+WB).mʅ-qC@j O*#N@̨NU*aT}z:V3w"]oKUox4v)/; :c+GURppȁsNIup~ǎyG6N:| ӋsK,REEbh\6*#'K68MOi GcM:OZ0WFEL1sVzcb\ӣUЖBzHycDvցށrXaFϮu0ҜOG^%]"q޺_rg6Svf8ەN8vicB$&Z!б bL0K\eP5caN`ʃEڜ !C\&@*[?lAō/x|I^!9ќU1˩ܗ'.iP"y,7?q vT:UcC^Kn7˱V`H@GQIS]>j Ց7|wTM1!*i\u! *_DJ S,8)a} @"WFIaUgJu CVL8`VytY D\(7Av!B W2iɓS(xF:LHEPG3ej{EVyt-K8v+'\IrC@<:QHɂb1T'1rNt B ;LuD2tUE0e`>j /Om9Bf LN Al= @^ S 8#PctGB 䵬.^qʼG 9tQF2:f!:*PyC| }\\Xq2lS ]6 7C]nGn}xч.oL.{G$sA0_O0"ABw+ /,wCLy?[g)&P1*&UB>#y!:@В$9(/LG>p jEm)) R0 . s&;@<'& "*> TЕ-B 03NZyWPH:E@v2t1a X^э Y; Py[E6F h$% g 4(͖|>j /]14G,Q o۹\/'V!mʐ4h 27} w.lfC7ѬQIΛN,Czzehѭ::=+[>F$CwjE5ߐ ˰1ATfo*YJą3'3&S1w(9W1oK`ٛe!n||GF֣ճϖEH&H41W#VfB^ăwӣgZ{H_iewi Mhg(JP]ːTm "꺟Sϩ:u5S)k$z ,|l%/L8M(rnSnEw9lVJHMDÏUio`S= 3:|>64Ԅ9bSEW ܆?L WaPC^i{-ݢX:tYC_Ϳ'`~㧺y9sÎYʳI@~--E{@oK@O!>k{h]s)wwR8 ^iLJJ'6>_Dc2ׁHӯ^.]/ n;)cհi‘vGί}zXs 6#-!-AYAX<_%5#":@]<e,!]Znu '1bePu*u{bxM^2$Up;oB!ń3@cĜE NVFV7X{0=PbN[ޗ t<[aPo[POyۙ9uaL$' 9ȧeW͆N`ީ|'E_ptV%!'PgQX>a~v6hdZ UJX%(JΪuj:S{Gwvkd X,,: $tt 6xϺC"lbtDڵ_|U_TkQg?yRM@5oZ#7VP@㇢vtԢ"+ Yfcq=Mz6^^?=I&nԤ'r=p?g_?O&gEF{_^52JqJl(!4"_Y4R[[0IM,f Sg09)X EF-"֑0L`%R"Z^q$_biK%0ȺvibOZRN`[#R1I\Ӡ\a=|racNkj,XP8p$;a`PdH̠+I2ReZO)sέשة!:x{lD;G {c", khsYS{3Q5ވ 'ӈΏqw KIB5_bUi|BLT/ĠQǿ}`ӠiJ&,*ߒ`$Ag(bp4s4̌3gSss7Qcҋhq.z:[4kmK!B*C (_佖V]M BZ":_t~j! l ;9N8<`JXT{+%RyܺUٰ69' |[ȞW<7hHUNUdq3AZ"~R; %Z R>RL@SN%lm t^Ȍ ^`A ƜR؉!KdO_#떀zjk ܞz&:AiU7>:ʤhdP'ZK(Iz yI"RiZEt!"'U:A@^e_E.qVqb@YGaJ# tpK,cHƴ (1yXxHLdu4(&i:rӷ kٴ̒%-K&%EKޘ RQZU/by8Afxb4ȥ#}J_OqNŊ0/ 5d4\J8F2Ht7B5U`~c@hQIlt92S) & jT;Esw1rvl!+my琵{r @ 2ω2W-ȣR!bl"6UTRB6xc|_$RT~c`&y1pZ ))9rP8j;de H 8) Z0%@vDPF(5̰.`+I- ы%aW~_ 4J0*[.kky% ҕ丄^H芵{mGD~B_GoB5zgՇ)bCq˲1bg=?ʆ18zFScpCFY'"{dohFHXG"  }]N5*`Z\N[lBtrTrgﯡz:Pyn%|ap]JU)a;h7ɯҍ{g{J#"GѮ[)izZF$6m~8yvgމ_6MRut,6?oW/~8?G}U O{«g]hY%VmF˯@ڼʜ_GeA<'ߓ7k5隣87Ztj6ozţܕi>lo%nsee|;9ET".8ASE_'B\pJ%_pJ*_p/8Qsl.|Pre8 ڕwGၱXjT ʔVgzK_AT5ӑ}I)JVK5j_ZQ6SHωlfH8 '8ZD;5N0Y>c2,RRIۤelL\48w4${ɰD7|#{;m]!]a.T؀4|VS˕ ;pzM~o X'mz[4 \9w4.m<$5ܘޚr@و8Y PpTVgͣ^!qw*Ek|FM֞3B en/i FY7=q#@m_52nBoMB;<,;$E> )9ӞSE11[Ă2S5ZN,b؄MY\ϠﶧNg"W mά8/=>Wz}dOQ`tY*7')7MU@s30_8nhhw)q ަG˷A2`V<`!*ruudBB, /z0;ٔ"Y&w \jOd!k[Ben/z:|fT-wJZnm jeX_qyQhoQ Vtz`,$c|}GՅ<'V6A-w:hl=kWmHt&mY6mό杲ҒDNRk$MM'؏@>I/&J`%?Bx8C_DX'2 #Ƙ\|cCB9BȦ)8[旟Wm#_Nv]7?CE&.q\jG3LMa&s "Ԋ%A+!v:(l*BUBZ8Γ ,ҽ8E%( 'A XX 3#)1smݶOF~DKWn燜=݅[߃+c6ǁfAvbKw2ٶۧymZ}ޢ\;4[J[2*"<6e)#OVT,N)ʨ4#GvQ0MR3=MِcTZJb)g\}1[¨^"A*PZs3/Sjg CI<  "pJ/_CO}pT="Oym"(BsgB@E 5;t"de*j٠⤊)鹧z @ě-!Qgă$ʓ2@y8(yQy6߄'VRCНORoMgG^!1`xLIr+e5wT \B NsfUi+#M"}LAOOEK> 4K'I A jO )`-Lr=đt㐗J/,Gf;L .A%a~Ƀr89dPMlMZ@و8Y PpA5-@i!P%@Jo-cUNZ{\Wʲ/?O`6^k7ڃWP}M ]pi~YhǛTա"D=u|yy=.W vyi- #1`eqvzg͹(]!vRho4@G|,( zu*HƓ}+bmf{Ij4-"{:hjN{T{ o;b٤~۳bbgNYWiXg"Jx'zծ†vX&n\5'B'F<~>ڥ脕hV sI&\=sH9~qQǓL:'\ϥ P09'D1ecUILg&DžIGWolnд7rBO羻zb${Cp*9?`oзCa}?} +DqCu+0b9ĭؐPSm rjES_eo}S:L՟Y.zY|?ksdhWzx>5j,چW/4/E%( 'A ?]en _]#zD*""S+Zd*?੅- ! 0jpjniHqQh,N )#iyr&t^מxp)3DI$ P6xB8qiDA*u3m0 qV8X՚#Z9k.h5c^.ۯZZmw>8l֚S'QΘ9M ܍ FxA8Is(Ӣ igi%62nu[[mMr=)Z% Tu[7Y ǹ{ꪷW"fL1o+ލ+Fؑ6´Y/-})Y)X8A|07dPro$i0-vYGNrT4yLt%frǙLB©I&S#Fk0:?TIY8Ǚv様:6K:{'{rޝ+f_#9z0O_k}C޾˩59YC4U|lR'x>k$!G]&r;ιh6R\mXJ4R(ךZMn7Wqe9/a/.0UQNTg5k vNi28 SZqGD_ńʯ-:@ ?~]o3׹tCY?1~Ko2/ wª10Z[(Apj^I4u5oCh4ӻ\>Ѹӏ^I`U#=ޢ_?^CY ͒vpQ?\ݳ!FaJI7R-ˢ[7Ok3o<\=4{L5 X]POx!`#@ѕo^ǀUR ^YNPR .iBx+"͇+Wsʷg<ʫg;['7vAfʬ~d4~'zzsSsk=q HjXc0)u44nxw@xf$SeAwJm#'چO.Pj96MQ,JDB&ON2j0h*bg-袉1 <29c@C }JI1mCQi2n|QscJ1󃧧G=fvĞy h"a"oFՊ>s^Gf}o~?,/>8 rpFZyDVe]of~|f2򘬴?F z}pQX˨=ioGe/IC@>dm&f_ }J)R&)rUQ"i*f-N!%%E8 IOh(9qZ=s"(vS~ˍ/T+ڠQ~eů^o_-3U6wFBEP"7U&׸`M08fDi74iR-Xv{ob\*D#;!ANdN Qc?!JPrڞ}'Dj|&tr{y RWT^ HZt?rS;^R:g޶pH nps_8D)1hCC(BX- >|"99žܞn pD( *a#*h?{n-=9 #׏ЌbjHW }aA-:!;ɸ \Nh:z;Aur.wrCwX]FEN$+S=6͜Fݓa3|<F=q׿}~a9fv| 'ڵ1OV-_wg&b_[âX6n?*YpPv7qiKxѦ`7@NOĵ'(DJ߲H;9<_) ;#QHY0A[X1c2b zDk45[!-O.PW}%V`5W`茓NZ2!uĐFRCR`R]"8/o T)Jk8!1pֳhޕ3Blr%. 8.j[i8ٚ>Ma`KJ}F%ُiEd>ZR`R(NU)Q[M*Ffd j0D9ׄVZ_]r m Cw-8F_Lhe@yCR d J\B:Dh/#HHLBDN&dʁV~EY1)f`:$0 rt-0!|`9c4 {Djy屯Ǵ @ QzHLd=8, A9,aXǚౚli-ON;iS8\"jWuAnW˾xOs?ҮaQl*0{:L{ab!e}f?(<]Soq~rg`H1vK 0<5/Zc}΂tUP V0r)`[$*D%U}U u|fުI*˙aDqLƎH"Q։ȼƘFJ7炄u+¥&LI@ (Jy>%8F58}յ! TPHŭ;xi7my0;r[sNhK{i (#gN:yD`kxLBAzB PWgHwD@4:&(9*N+!H&֗>< ʏA孕nYScRbe nBTE5J# 68e!bQD†KI`mKBkPiFkt2}*Bk V` ^ Β!8g)~~ k0W4F@h6|!0'0egOE10 ~!8htc\<;ѲcDѹÝ_: mIg:y#v4ɮL}}`X.\vlM8dgX{NXRk! <}v.SwfFpm{g1 h=:mqkB!rp9tHtEJ10nٮ-I·pݘBoxlނJ^ËqWk2 |K䃗Usq 0 x kۏU[縭?o^tUcLuӐix4UbyLCPHM`b-ѣx⤬ճNY7kZS9QyHm0tiǘ%g O˳!Y*AĻrn|w .^|_D]ˋ7 z;VB;;h꿿m5njjo:5Ulu |&\3[1ܵ/U)4c5qqX4 -Ư N5_viKkW$t/YdR9J* c#\HiBz%+ ^3jZ'цt r#Trf9%89\n8 %6DHZ+$PCQHO 44F h0CimY?I;o]aۣo3CJM¢SMy}WVT>LUg;kIvBY to*K7+NhV}nRٿ,]u9(UJF/5S< Mtg穵T9K Oᗏ9\!r{n)1HG&Sܷ4dQoS ptye{h'BSϔA,. C8NPQZ+ F{, 9'|T,YydcOL"<,s$ֆC>t2q8J{|nN{<̈́s{j?zw{GƯ͔ȓDFpCd<;oԚsM8CQ˼0$8KV}e͙S+ _c\jŖ6<1GNƧKP/+{/A޹bS)$;IA$&B<3G EF_&F0 5!g4>Vr)56h+ȻczS/N,j\/ ./[lG_ A+(}2|oWR/1",DDꥦ0~)c"ҍuPYد.7%^׮]'|+)jSh/@RIe ?Mo]P՞A &h͘Jp3MXlΜg&mĦwk4yk=yi4 1dĔa2*DMޒzMdb'NREY' NZk[뫚[?uV+yo@jQ{?m(p@lUĐB5UA3;%aTRh+4GJ%13ȝDQ,j6YTMukfѬ5נ4^VuO!]lzzgzLI&@8X\|}=ꕿ(Etן8w_`mJNMtr%\ЈT..׆AA!+]6X<+fgn-8;L;z i^\H56 (& oKkQ"H*0|Ȟm@}){C (Pg3?K(#96Gqx/&|ss IRqD8NBS+_~8R!ų_xxgj={Sݘhhv$;؍(pJCv=|0aqEV/^Cw!aQ>[yt5j|^ M\vSU# ^:^4WjL@^e[62}og:46rX]** ׼|x)3OlLroet:&kb-U`qjh 5(W+Lv)CU<x=XsO\`z:a )*O"08%2xiԗcB)7j<[֑aYqGkZ ش/8}E)tL8[ yxD{ZSXx˕`#}=Ҋ̓qj1yL}*Ÿӷ6QVqgP1_ V;i &H P@s ;MJ:%/dDcHjcBBؐU(B݇,5 𷋽h궾ܛw~v}Yd)\lm/xۜc7*g徭Tʜm6xl;1 zajeMˏEǸ/qـc`bҹVpb#VZ]_NjfI`k^*8 s)B*j|sWL#,8ZI}kk5ck=̪/TP)EV0Y Ԁy7A[մ@÷sz0:g`4guOu7EGmF $g\Kr1t.&9L0Û^%n2g0b]8d{W&Aμf{1mF6Aw]4Kk՚*# M\|L ȟmF٪M¥iCخץq3f G{\pV@h>{Lt@pݬNPOgK~ѩGɱ4kw%smm50@X`G>q6GH ɸ,6$IFW̆\Fae ER y 4c wV@Q\8ص3&'Ih%AqLq9pvsF= qv{jg}w&kģVC6u 8?Sأ>ؔˍV~JHR6+C8^y\(aB;%TԸdȔQGhqJ}&AAPDJjq2qKzI"ZM˖,ye5Y|{jn>}ԸH"KRQtX".97Vik9xBORZ&7D:-¼s-4\gS_p6Xt5kah:e~7o G#1v^9&Sfx7azS L`Rv¤<n# kTT>폝NjZb'hmc'q.v;4Sj[q8UDC~ PWw߬(HZV8p_q`D%eH~HHҺHڢHڊHCe {sl\:*))grF ʖ-c^C ]=o MM|"Vd{F~|ty,bKY(s?Ǘ*gZT;ϳ"'o hDHD%'U.VIe8tZg]9WkXt~]x~x|Rsw_#yE4gߧVp+hnMwQH(!ƣ༷T,NH]rgW3ktR2E咓s?Oh1\ßh{ (g2&$L=XkVqDAmSLlpT,IXj: =2z -b͢p= R#wU7A]w/ldM7'dNE 1jbÅtDt,Jjm/49]~`&v]ɂ>-ڋJcdeLL2 L0Rd=II8 QQR#8QqמܵQ>g*ۛ|yşP`eПL;00+.2½gexZ*5Q'퉱 ª M |@wʾ!KRiT)) 3AYL8Pe$urΕ':{CsR}^_ϵhl $G{W#&GQN+u-$xLBGɫˇV3}qy/+*mPycEjk͡p c-;h|G ZaEku#Xh<3o5'C fi$K$ |F TH\RJ%( G\&2F"Np8(gp,KP2u%Q)6=CٖtޢOYS~H£N Mhǻ]d/!=&t 6&G(T{-HƳIN>( J9TI5Z0i3Kܥ/q ֒ aZ('j%2X)@q9@ ;w{.?{ yw0Y0>-3,x6^­(l(Fn'|c ךBQ} t+/|9׈4QSEDdxXd?*u#eCSG9<Uq]6% qbggL>&qs1rdl~ ݨg+JWYa{MñEtI- 3Lh;F5G_j$wS~)sw3a|N /(.?&oϢwwwy>n0"}}2O>2z.o#!Зx \-7mqۼYwۻ*n$cZ=oaN暪lyTOm uCW+&K?ivoz_k?gO~t?~]]0EIɪD3,e(#1}̩qy8oR!Dn3+cvGSxˡuhQau"'3aߐl ]?]<.p;;~j :ۿm˻Cڛa<miot{j^PsYJ[_|4-^K\Le]XӫwNi~3MW,Zlsq<\h~RZڅy07\-΋ BσQR. - -ahJ[k: QC:\uuѐ Z%l%KW (|')n:S٩ld9Κ 䘍Cx G* @vzet6_} F9ᔕ@Tͼ#=zPQ:<3e R\Ec)S$>U LH 8Ltdw{ 7bI4HBBN3=+m0b5qFH(2ɐ렌BPQ䐼29X[B,cj/GMs}N,5=N׀Z,OsM*_ 룲e>K e!WuӌAy;扒`cDF, i:xdwqn1έqo;>diMѺ(Uh de-h*e.zHl gY(Gq,H@\Li’`B`)&m0Ξ|><2 -ۂFjd1)ƃhZ1# lO2H),= |n\P!L!!iMr) u i*D'b1$RTbV:;FKd(0T'rTg\z#!& %$vkEG`jV5}iR9RRIxR!UnӨ>*`'@(2# ?I[|0-$w7U"*kTJHp2ҋO{ 2BDkI^Gޠ!GƂNʩ""E=^N ~z´*oorijf$0N\~K4,~\}]}<`yr3~ij}ӧ7dlA}=RZp)y>5q=%0UV(RQ8G~oi A gf$Q ހMXf4 4)?CX&"jTږ%dFfWeҔvcApBFˆhG"G#Ju'̞3ҍ#8~c9bЀ:'!9` U&YLr\HRWNo5e}KjU;k#K7It8Y.@@=zH 1`v*gΌC@e&8UTFzsN[m b)QxY Q䀗{,9ez8Rr9\i'M]3q6QTD'J/. kwH.tۯW>bTY5m]y ?oj2{p|iNjr{C}o\~ v9|Ę=D% *˔E0C4Й  -$n#ӁY\ޒ݉-˶s}Cpտ̟{Ct?9o.^P+:wϤ\ʤY/覣ndb&ey,h EB+D%ϕL1(9J!'isq?I GIX_!yElG65ذG6d;<2WPfGa0Y6 #L&]ԀқJ6NE"m/pbE$mqgN͚)!q{.mOd(~? )Vi^0c'^L|,qr=Npy8#2s/ipie2mk1=7#و%Vc ChwÓey9_v@=v͖$G7~f`WAx!Vx=9enr 'a|5se9: {+@,2kATP\򌡔K*U֣qC $DSE}> lN -BbqDT9VRzt|5MrS=ŽАpKF;i)jG#vEg' ` l) u-W/jkr?;_U 8ueRK庲Zu* *}U\';N_8+$VKUP෋F NWZ$2_yb~z`tn"p[9.ލ;k9et!IQ  vFy(Tژ CR˦-(I+ 0E78%יk)U,pO! F֖޴υɪݲ^,$Sڷ[מ8߱4W ܏ 9ΞL}<a|?8OCE,M? eV' t!1W%s d.NzVуwi!lq_GGk9Lhsq|2R_;uIAluts@o}GNa=`–+ .oI.'MK)m|`rOVCU*|ߠO$۰u0Ӷ]yxzuy6,@!H-4h+d'0YTTcW!p{*?U˝IKA!W9ēD&r@+ġ;ѫ]HqUDvGk>.7x0~|t~,bɴpԩ>0Nԕ*YRd)R3QVR{VvJ HeL; ìy'-!mXA*7Kdy0}{񰖹roAa&C9%z9R^r1K,Fg29f)hBK2&ЃoP`t)FKH'k/mCI!P1bJ(b]+8If`N{( IeZȊbBx#!UGdz1֖̆DU(UDUxU/wlh`kxm@q EQԢxvqȵꂆ9j7}xX[3c :o-.D&)QxU8zF[!9Ơr*HeQ[CsdYYK2ל VS9K4)j9W319ckYR [Ìmy! Ocѱar] Ug7ya70~* FpV[,dI+,F{p4&ne4Ece!]N2 ƶasAaYi[%CpI&SdbbA$lФ%${Dd *6(ƞ9Fj6"BLpi H@J6h@:J{JJ;j {X oޯfQk8N8OQӴ.fO0s~ >->VӜc轪rɄ['+#72+`2FDxV{XmW]uYZ};8Gs֞i!Nlr.(9JS$A E :bdXC&xBL e ǔ,48 iQhi-*阄r,ޱ([>jm9F,"l1vUV:vTZl*:^m+MMT5I. 6ōΉqH2`4 ~- J2ĒJB/:]z\"11yeeA N9Hcdc9LZ  QDB"A {r}A,KF(Bܳ4L¥.&NX'E0uiW{#?Q*nܻ^mVx?Kn%[8#ޑ Xōr&ѧ@*5gÇP%ON-`xРxU_j)ᔐ#?]|k|)%Wx{,UN4/{wH!f^z<ػfށw{mȍK1Ұ̓.c%;93C88]xBl$ \57m5#"\y1P`e36^w .iXWT\[{HTO?%֬~~mfռ ,Sq&Lir6zpkĕjհU㚤y9zMśw;{=ݛ>^v szדم\aΧw %Uo8zRvzM _Iƚf8TghF4OYFD1 G5$(b4=џ<9cgYliZ+~K$r4R&V>]2cqONF1TNk:X8 sBǿ;z7?X~?=뿽~H 0ITPkZe;/៻-@ﰀSOx[S|Mvmzj&rü8:.bMT",LJxo XN$XeQl~KzS Vރ L<6 PTD$,A( I_0.UM>p9+R40!#Y)KK,bGڢ%;iPg&޵m5#_}F.[/̼3+ eUk?t6 iPEUS!B;T[;K|jgI|2ȴ6дW5/ p& nRM"J'-Z:(% nRvWgO_2edzyb2dbMKVƖ#رʿccG+f\Y8 r@YRR$e\Ѥ5!qLRPTJy]W=! b2,BA`e6j-x.L=ӞIH102&nV{u,"M,h~4rsW!]5;v ˌ%s+>H#R1깢.dNikEE\ o">46孢=$)tMvfpzPrCm3ǜ#Tc&6W9J''N@H@1d98x(X!TjUO8~2rug<>,@uNS WʳBq5;EZ~Un L+u0\H;G;zAFu*O@ 57׮N{e J=""1jC]DJOŘK59u7XV 铘_ΐ2vnrG5ʶY:ߧrH i|3/*<; ÍkuOOWxD J 6k"E%}ieGRܚ5Eq q`"8Z ~sfʬT7`cV FZOwbqTyoLp4ޞ> 8]]K"Vf{btȃ+@h{ץf?!75Kk2o{lG6;Ru- \߿m[/wwޛry0`1XJ=wt]'[: \\ t߱M9&oe){:·gH['`(;Z`{7WP#2 #Y;K@L `LFP=$kGcoju,3sq)Uc,1 60us!2@rDQZⓕԩLXl<Tc9x7;#zR#kx2=Z( *[$@qQ|H+tTK%gN,cjl$d2m#iM `\V3l|]rF5F昙UB$ PIi( LU"6@(%g{6uc:(5K!*Wb\W& O[J-'i/#ԂW;b-qv۲))aϗgDu_~ ޺Q9ԠoKxyҜg"(or{d1H*QVk9Q]jKz?ϧM.sZzb.l6ZjBeem* S'2@u;\pe)Å˞+MynIZ[*TKyLpQi@6fE, Њ rg"YV!,_^ƴQOxn+Wج1)<@"+Tgj84)/cNРV5oOjt2#.!IER&CSV*\#ÿ|TRK%O䛶ճlQɳ/z6嫶Dޫ|-7VoD4  aiRLsX)sRT [٦c ƖAXcЎj+t^"z8 2Dq[8$4߸&5m&٤94glҜM94m&MϚIs6i&٤94glҜMOj&٤94glҴ94glҜMIs6i&dgg LM"".璈HF_H[/ מf}eiJ/>Ag QEb5DR0JKYȃFA7̿0":htIFCs" t!n<&b!ģcJt9XNFKKC6#s S2J(Ԁ<R2&nZnᑍ&\`t]$>1"Q$sL{PK+)+\.BU\Q"5A)pRbSTOo 76w/L*&O#k(ua>n%Ξ~ c5E+\ ScEnsbF$`ǜ']y#,kAM++? -t.j.VTDf 7\[]ƏOntv.[22;h\R[Af?'Fc1S.~pQ$ ;2SJ:\Aـ T "R2'}l:6"p B׾tѲ]Ch\Ul1ۋ^ 7~^;+׈N&L3PI|2 `d*n {{zpy᛬nYۖ!/!/UD]!Fq٨bHiPTWŘ0&(A1U/H;#"ࣱ&' /x(b2"L#'G+[G= iLi $ JyM:5PwYH=vͰi|j £ߖ]WPkщhLutW^-[HG1 ڡo&Rd d28n,DepT ͭ>"P 'ӟ7C8(Ln[l?î;y?[ͽ~GKtG;M|]쎮Sx>x_!6+ev;$ypm@1'&غiM歷w9tfw:Ė[~ƻ;=;y0v˘Y=qG'dx`6-f\ t.Kc_F>ZXԽ a_enkJ8"C*]qWbFw2jwQd]| c$(^ݔرkZW7^vGIp3T)"#Y;KpxCLFPdSy c|9X/iDme2DR{N;_2>Zhf6B%S]զh62cxP & ̤b\l}F9@q%)*+m|_=>zw 2D+sXL[9i)ylVݬzk3_1^^[Tuz4Y1]?;я{m+d8p$q%"98NN[9@@9h+W=}Aq˙xyk^]oQOnU;ݚzRٿvjۇK>x1_ }%NZvA؇f3PI0ͨpc"*d3>&ExpD*'gDb&R۹L x_}$)ٳչ 'n}ݥofHX>7~6o0CVh?|DHspv,K;76=kߡVMӛQ Ӑ 1NF9-2\* %i$ ޔ)Ⱥw.)[~! {_6֩4,QqAM3y"%Jwzr1蓮4)fS)n:W'VGVY>Ntk4=j;(%ԛڰp὾N{\B+󐊶ˍi[3}a'-u1ʊ" gtv`"3Ux}`c{G,֕C<x<*X>8l40A{ z|wxu? S[tS*E褡~gh~5ʧAQvp8d8`z9JP9Dž{lS(sQsZܥ0NBkǸ U21U6YVpխFYw' FW:v})/]\%.]W^RM=OϕvcpoR|q4ZH8NCo_ e-q+P>0Ţ3nq~ҔD^W4Znz Wz-# űc'GK8%i<$"Jnl`,:3D)Oe",F6&fMp&Uq! ºiҗD6S<Ƙ%W2JSkjИbAYhpH[sc8D97 iδ>E(0lqI"->yQRі$/{zA#w5&̖bNEBwFhh)Byhb&Dr @7&#uYzsƅ{lY!p`jܐU . Q_N/ 2)>]/TzLIvAz—W?7|?O^eKgQy9?{@P r0o;Am#ۈ|#1|i@ ioeu՘9%Fz@Zr*ir##A&%^zK2^'PקB]/\utX%dIҚ I q cbIx!&霙,KZE kZOa IGHQ,+]`,ɫcwB=NN4"{;7 l`T~6qAmj8ۤz\w w&V 8}<ʋ}E Ш48RR7+%@eMjZcTXrQ$OZj$W|])r$DJ%Q|161ɑW=~`̸^]"r Ge#y87.ZGEѤ{+ܺ4s藊kxi̵][61Mg679y!I(͞!&I:!p&xaTER y r m8-5X*7YRep3&'IF$dP!S@\:9x 8KM5.=Yj;uԩsM1mYoWqQf$m޲_U.[L}#wN(Z~% *m,vf!Fۑieu[A«U%T0U=ɞOv=][5 `f@IHAŞPT*hc$.R h3Q>piTȅƅ!":݃ո{DyKhX}n {9cGuq8@R==/,z|k+5҈ 7ŐjrOn'nJ-F7@N#,(P%AK08+q+u^*0%OAh4Ir"e}FS#!#2sjʝ@XTn.CQn.0ruS\ QE^@D#m'|c׹ ÷[/χښ6a$6&1f+k-jiʉp+!~R'TZFj)8*Q恴;eS}m|Br|L:8IQ#2OL =zLt1'*VvPi;=ZU1%7 -%Y80F G: ƀk9μ CS෽{— s4s=$w[|:z[ޞ9`:?t0"}}V㵹+W͝qعbf\6D4O0#aS甇+K8n9A|҅-f"HVH)XO] b:m`5ۋqhV'(5 ' PuXO#!vЏwSW굽7{{g nsH6z Aǿ>5ř1Sg{㦆}Ud)aҜfOwg'[9Pxs69]_VW -9qӠx^3 8TyH~ \  #,2mᨒyn1 ]^'7MNZ u>:usUJ &)s<K$G㐇>+Q8N=Ey:\AaP4`v'OAqtnw>~pehG> BNTC;Pw@lҦ@#9hn&m-z&\pԁp+f5 x;qJj:MJ&W{n$EX 8&ɣQkĸTZ;Q,JъI]k$=a.]}~aJ[RBLG/ѺX6Ϋ`o:uvtc4e=`gٖŢd4]tNdZWK _MP9x>~z>{ҹ9UK߾~HA:QIatpZFЌëΒȠ oBŨ(~@tBgrMƼkr%*M02 ;8Ob'jO pM8*+ GSSe?f W'>z&.r%E{q{+? AB5/ QhBeW~+c{.ձCUǦ]>Xulc}d(#PTN Jr@, +kMY<(H)k6rA8V3͛t,ѠX<}) QvIdI*BEL".~rFLIeFwf <ޝl63{#תE /io[[le}BEoaWLћ.OMƖ|ћ%mћXQn)XJ(!Dd|:H eP4x_$j)` s,NnQcwQU_D;%%?O}VZc2l} _HcןV4Q1ZS_LaUEqmt_ $;W+P@ DФJJ})AJ!yř%oh PB=KYh6E ]\$^D^e{F_bc #7!YOGb>i 2TJT ݯ4 2 L ʡImk$y0;TrvKDܙ|#Q:x^V<뛅 -foWFi$T>5yPF dbo^Pcp[GDb:Q"u;˝^iԭCMuGo\`Hs Nޝjtc))^CP&D9dLH{ұ֒Bjihr) %͟"8hoPՑAkMLhi:YfkqW8睞d㱢[-NUԺT-Z~}HK}G9flX9i+_?>w\*?eCtF4 ̫(C:V1U1(VF(kN)FʘМE#uLֳ UQ*K(^2ެ<,l2aY[YxpQ,]jNLoV'N*0kzn03؍Ć O1̦pgYq6JDl -1blb‚*H$hӄMv V<' KRiT9jh2) 3A(qhOv lygcPBZڤ4Q 2#&sj&Tę$RJ%hms_I2}5ޥ~~(pk.5Azm=|`4vxa&b`ZZ6L6DBDk492(.3=$N w[~ j'_Ux4zY7!na;ŵ n]D@a DA4vR06z|Z3|%G!|ZjiFBmC8l cn`D@;ZG w VB cNԵ,#mrLIT(@ZT)Yp$3>!dgL>&quȝIyНgՏinr`e S٣Up?c|8E$Y!9(_aSC1f3·Pme.gUSBE;-Uj=ȭ o^?`:?t0"}}V㵹+W͝qعbf\b.5hdwa5GºU+)]Wprb [ES@x9q[@Jn/^~.Όi>8G]EBv.itgxv|⿕Ս7gمeuq20 _k.:IՓnoA!ԁa`d=eb9@0URW0b-F| i] 'YnJIz$e{G2`hg% bG(O+Br:7 &=?1/(.>xs޼;8s@ȭP0(y7)o hg "):%u ĊY y^plN\*'.Gn=.ehݥU)nZH0ZY%6Ib\+P&ƥIǕgQVL Z#顽 sRTs=;~bZmƗBhd:(Lx%uLb_q^ xi$StWr-sYçSgmu~7H\,JM"ӕI焦zU8KUZ%fz2]ESie|[%f,h[%V(M+ʫϿ}闐 C)2ZPٛؖ[`Me2E;lzZ7iȔQG 䠌 ,PZ1p6R̓fSАHkaڨ5GJHΔt%T E\CgU2q<ڭ$| QO.}VB8O#]>y3,%S>\O0.ȒTԣ3E\ dNik9xBORZ&7oDZmygٻ޶rWԢ˄/3| v.rƑ')PoI(> YKѐ>Cy Koޛ~yG&[gB[ˣ(v 1َ@nt4qnf3D "ۛ !w"̒9cv<qqrxE]F7\j$1:r U+kB̅El@\&"burHh(e Ou|G( lqG;e_CyT31j%ßwO O}vkgsnqTe,c$ծ 3<` r26-Pje}6.:4 9JcPuBje=ż>Ž|)dZ y˜rERqM*0;D;@cr=sH[/GI "])XVqn鷒ٚc&pE8KNT$BT o)ƹB(Da< 6i᪴vk&m7쁇~%p5#Qu/ö@퐉=NZF%11r<$*~DUSI.9 mp7$Q$*`嗻0JJA&a4 C?DfOa٣hX".K*9%,J*NQ-$(j4* ;ydн=(T>evˣnWI^ȍM<Į}6?>#1'v0jͨ5xdCmGs@jɨ4?<σr>\]mx7D<)`./UsE]/w$FKn'~x2mJo_gw- FAbceaE̍z;T={W,Y#v zD*( I9kgm8kL>P,ќ%*Ri. [) "@O]L!r=(yNZAz3qsܧnc^Z7Tcś 4Ld-`"" xHxp򄎾I`x2GU\u2C(~]4r8~GZryxMIO5/y(d[CoXdPJH5ۣp$G¯Wc-DVZeXJ 3.Z0j34~mK%pLcҾ^%*A۬Tv!8wR aB(}ꕭWڧګؤ1/%wfm!ޣ~P]RQ;.O dtAf#OFHdeK9&RtJ@yd~fK΂;{/7l (]rLj\:lF!} W;]䙖϶=/^v\"gArpY@:i1+,u!l!+jlgW+AQF!j1TTdQE higk'E(NRӃ 40/}~SXVVXL#3B SpSNX.qZ͠V!NZc +7%XaZ{Y!l%yLER|&"YIPP!qNnnwYNXHat\(Uqӕ [`OH $usݎVYj9SI_rUh Ū2jh FC.E!p- V 4B/Қ}qI6I)mWS`o8F}1[wRI)BVBDl)"z r?]’ iMl@~ǰ7}'vF_ڪ{6#mZD\ q AsRAemU-z!,E+ VsZGtQM+1Mk/ϖᏔ`,dttORV%;!;N0g9:2^_5CJ<"g*{% L0XOЁuT}|2w5MQKvyuvVԶnߦfËCՇyC I0/9,U g|<>vrA%AE KS uiAfoUe:R wOc:Yv幬  Įo6l\xOv#תѦ/׷V Wm0+lnzi7@Q:&DK'\2FI$SRRd%A rJGn?Ⱥgw3Y2!wA~uVӦE-"uKTYg_S/a5"<`W=X#9`3̵zzd⅍bcf.?\:TzիŲSmB=iIm /v4 hmM=^mf:x>y-=v[gi&>ӈN,ӂ c`&a|qo!}غ1{P뛋/s{|͜7j#|t5f$[QIgy^4 h #}!@=.}Ɓ"WN\eS%e hO ŋ,3"FO I)-_,ΓntGs@X3c72^\|l^Ϸ]vڣx'7mw6r(Q-] CRTEKe#'Rjm]U' gfjo+!Pld/ Q+OFU"o]8-Q tZw+rm#;bi@!"#EN7'T x2x2"Wء"e9Rt\׷uş0In<0yͯdCtIk >H p>eKR/]KX."&3PZH^0 U9&lEkS1+\n*D!)8Q9jW@BA*(yA8km,2ZALm*O/=>\B} I"٥'s>TyEpԎYRȃAt4qGJs[59قEM ΔRaƢ|*&"wf@@{ ?ғ"=HO=Q34:J=)f:X=.Hԕ\ I(Xu}r8ϣ4!**}-ke}Pv@ZGBZhn`'WyEg/VJt,}g P `Y@accE d0UW.$t6G1y4Z[ WEL"\S@(*Yp\hTYJVƽ]q{Mz҃PiLu]={x7Uh ?@#A= ?T:*PuW8􇷓P[N] O=>2|➧yCr `G?87_\Κ&MZ3M  鄖wZm>>^\Nɧ>q W_r ? FG-Uy8 Q: s$ԫߧئuWIm~M>^\__]Q_+hGDefٮ0{gtgTڈ4D`7חeuj2{횶dPe@ĜA`Z1; P^ H`TUqJ W5\y';N~G wW ,8ׇ]=I\#쮞&:zܕճ?Xz X8z/ʹTu'; 1өsz7y( C1V2 1@gukbI8 T^X.[XΐE灁YÜT_~cI@dMr(A.gz.AUr2] -Wc~ knE>_x?4-rZwf^5uiT.Vui!CH6OP"Ilx͋E28V- aA)!w y$C(st#Y5ŒpZiGcY٢KP-cr{޵#鿢mky8=3g ;A-D|,90}nYRۦ,9qb5EuHV@bФI{{D9Vs}6:DvڑEn(UN 0ǿt^b쾷ukɳ14/ㄔw$vɄ}6M;9SWqi@bWKȻGnsrsG(:ꓑ)pa7! 6((xt`#RD "hi$Cq-߸rv,"|}CVD"=e6Kywx|BQ9$Y@gIC(2rh; [{!ISt5PXΊSj6iF%^Az@PjLHr^8Piag5 d@0@IvL/_>8"BOym`S^"*N(>YA- ~'kH=TĬ8oy7F=JIb3G9X.oE:qslXqGIHlRMVU?O]|=0\vEZ>;jaj6īBGRP"+`7h,$ AK%IG~$ݑ~DGRp$#4 ւ2>zs,Dr0&!N# \hFx^vcۓdx@|"[jƨIEp((;q(SRXdEG̡< R4ʅAƎHƮ}W;낑}]h+pa+=q -JE*qAMY9OZWQmRp-b'b}blW+Ɩ2rBECP*iL6h H$!$Z{ D[b{r`rv#G%)NÆ~iMʪ;_usUB#EO̶8PȾ3So&O&'S+T!{èZOUʙMF֬BT<9֖J@"!rxʟNG0Yߐ0婧S^&##1WY୓7\ ‘DMb2DT0udU6F)D;M zr)sr)CJR69k(Žl1rzn,u1>tqu{9wm9OmqyxmgmLzǥ"^ԉvH}f_1;npg=hܗȭgkKK]^ }.iw[Wغn]-[os: [W[VزjۖQwww8+=z^kf-Oo͹#w_qvO%hk޲bvmӾmwm^3VN7bW#h_\܆PB@ZيRy WQޞ^IGdJʚK*g/݁QI5eX֕}чK+>W0FлA>wgim7ӻV>QKɐeulo5 ^.vۑ)BHfڵu_..R~hYe rsrCO }\D 6P&PIaJQczVBn䡀wNw&[6G{llrw{E [>MQ}m2鷎LU8F [A2p$CN,XPPleZf']D=ɤc8h" Y:.thsA{4q?lc[wMP]LS{Pcaȅ{&1[;wo].Þg2[#bmfp$5-+ NF;>ΰMã92_Is%ljyɼS*LD t2碯fZg e!à?-8)}ryܥ#yN@f-9FMI, WA1 8fc>0N,L+ʓ.z~o;zTBoǴk܁rn69ђ7lKTzqdnsg[ ?*{ag IjxCNfi/x e/(zc> igRi6:V[\.it*$Bx *yb Kp1rvKFq.l:n={5TVSiO,W €QE{eYE26eBqW„Gя4R Se4ѼۏܧŎbGWh'o>@EFM&c8luDZStk9oI2DPzpt2`B 6R R,[֙HAh&B)c$WpP"QYCteLfߡ'xa\Ϗ73x^Δw\w,m+y-eV]7;^˽5U}\VoBBJH`(DFr D/9băyDB,a^2_uy@H)% y4S,')ꉄDx %"Z(Ad P\t=UkU4.Qg.{ڇ0ͼ3$!`l@@eDƠ9$QT6𭻁۩LSAB&9 !5 1, C<04%$ P A He9C<t:/ PF|]&%%ɅyD,6aB B3~t[Ya*QV}m}kϵ0 `ztIXgbF9>:S~Fkn%h~i}?3 9Sdotigu!!8|Os; __㨉sˣK6|`v-t5Φ^h?"wts$.6t5 Ft,PhCj+W1ctr.nxmrJuQIv5W)yzϟfW>w}=GFYmԧ˽Jr٩-J,;OG~|yTG_~?~?/_~˧_~B;\Ez  ?ڡA>Cn|SO]"ª d nK\o@%+&sމ{J*4}t;I$KkpL]mo9+B.n&a.`8l2X0AWYHr+^,;nI[V+$MVYŧU ml&RVX&UP3B3-NKG֥綋4wB` LK<>nz%RWDl괲)ĖD˛m–]|FbP2(Z=ދ( 﫠{֮+CoRZ ՞ %Ґ~C~2y(oYZ]~RJZ^!޺#.m $T+ÌL$rˮhHhh'45@Q@TPB&ON20hA*DPvn~Yd Wy}{ZQZ "өpZO}oG#>#'Q]\ʆ) \}muZ:@aTZyZ!*`*.>ڻYs.ozlS4xaXVA2qo#IٍQhEq\9C9 DY &g F8'o$5ȬmW-4;+TD<&+-~.gЗ^j\2!Qq+%v}ȉ2yDHGt4 т>rz@F0t"8-^E>Ptp]uyӕ ɖ@ M'Pb:E˯}srRj][R* ݐfq>GXY&`BG.Ax&n{-q[l7%+Vٱ-[m^vAf<*9+Nu@x)F DG9hxh&!Ȑr= mMKD3b"jQٶO7jMv%[0v"6?e(E, ce4%D*nkQ,()?J,:J,VbS.+R]+4W dZBq%b\w\e)UAW\i%\e+AWY]<\e)YAW\\eᠫ,bPZJhUr)V+2W9N zwfSRw{Po+L|<: xN '[f_wDEbG U'qIj[m05Z34q۞&ޓ$)5a2 fZ3LE=5W41WY\\eiw ٟo]s2 ;86F,Oʟ_?o'K<ןnD󦷔Md=hÒӊy=,V0x>a1RԃYF[!PL{m8\ T?[aAޠ9lxq_ "%';sI䬰JV*Ta*Q=GUأ {Ta*o=GUأ {Ta*Q=G[أ {Ta*Q"E|rqg9{Mb:ۚ%Eٲ].X W*\_yzMzz|gE>\(n&Ti+f4Ѽ9o9o񜷭iٲDeX-ͮTqjvg;͉{֭׭(8-CS$"(z&PG'V;/&b'J)KhZvLhJQ +8PV,(Ca.,!mW5q6[ls՗E$¥>ajcÆz\E+M1M sxzZhp]4Iʛ,A*"eCYB$LᔓKׁj< "2B,6%! v49'AśFv2-.`syF98+)cІzG躎7K[o^mON'_UL!%'.rf@ţWz;p7Vzivwoz{|?W~/x59}n.>" Xsas>Hӳ2s7K`8U^Zp-o9!M=1t 4w#Y,PiC k#G1c3]UTc@V:Ȧ^UJAd=\Gr߇#qLl6#9mTJO;L/Nǿ9O~LA~>G\#0&4y||y׆57iIצͧ ~%m>rC%8/B@^ۼP Axo ;qOɷ{+N{NRȅ9H3I%0%mB IT (@pfцujbġ(͝0,B'OϳwI- fUk7"+:l+ِ͉-!Y{[Ll3KU:"NwҐsB#/'RNxKGnt-3ń)RWi"uTl%sx?E#Ba){$ckRhedۑ?ZqakKxD< -'4Jփ$Jcњ@2zIPq5jR(.Ha&rT/jrN@$!CКwā$6V*cXt0 )ۊ<{:@|˚V1-U43ڭYo'8dgM8s?^x7, -sy5]S ~SAsUg$TF ^yh[Qg9ғKZ=hIZ}l]l޼Y\)xĺro[tnǧ6@*»wWӻW79itqbsz4 o廛7>sV6hya4ho~uu 3o27:Qz_pvgS\Loݴ!od8V۳N osPBRqھo^w&fqmuu[ֈLVIYSq rƳk"pYrv;cjvN8'H@mv Q:Xks֡/jg#ӂZhDSs"8O@b,}ZP[wgvK 1A`5d_q4EM =#,\ Lmʿ`6 =U- ׭UT5?uxIZgZ82ЗKvMId;AR#/M&%h8ǯl,cPdRRs#KB RL8ꚇ,w{,Of%H= EA \1rRJE˩R;d0aPkF1V:d I ( e5BIK˙')fț2cƳҋaidLN GVradW qM\- 2^bV@ H{%4x  kP l[SFN;9 HnKϣhrJ뀄Ab01ކ Hk `>V 4(* %B# 0eKD$2 RjDs$&C A*!lhYβ_M[ KY3^HP{ 1 .[*hz6a EMeb'D_@Obo:TK9%L5xEɈ;ܻhDtܤI4WXW !P H@9 # ;HXDJ%(),㨪WggRz9h#MtFcE)0,(Ш9 H\rFvҖ/"> ,X/l+|48h(R Z+J'DpШj.B  [ӝx{8!J,%AoƣWc pKY2; :BT@"eLd%htd*뮟eH.R3)%BR4w,ƶCQ{0rHRIR )Pj*(;Cf~]f=`pa),i://\٪ɻv%W]4<0Km*CA@%gOUPƒ,罔QD4nPH0+>r &UϛE&g2s _Wt5K[YKE:%٧.C0?ŧ$9cvƱCGBGD(UntZ߮ EAP2LiLѼ*u"":׶諙 Ն e@y6(j8Ox3N2F_\wL!TSe-:P$ ^F%eR+7dRL,{S<&g?:xcbeTfH^Wu9O!e?LRjOB}2ߝG޴'."peJ(D:1QEe=%y36vj,NRY3㝉> 0R $0R\ަ{K=09N=3;”2Z\g=9=ht47M&Q=y(oje1GVb4uVJ|dxYqp4Bf$oBy-Xhm Fm6*GT K ;A Ut(wAl䬗8h8ehf|>k%>EP+:@%g٤&b&R?|ZpmIVlpV|}VN>{Wf?D\|u7; F.GO8?~v7j/SQZ䢢L 680U93tvYlg .TcB2 m0nIhs}_nu>{=ɳiy5=~ 0\ eT}u._6ؿ /.3 Ƶo}_Ԫt8 wR,n,oF3RW߂^ ϳ?_zջ$?.Mx?29-J-AƒEi Lw]pѧ*E }2\Č+=qY9Qa Ma4mWDaqjC9&G VnSTƸ#aREbm FܻYkcr ʽtH+$s}r*oߣ=PF =s5))gep&ID &uD%mLI &zGQ[Td]i!#;M lh=KG0S(ʹz Ѳ{yOEO C(j0}_xX^T|Kf3XmJ w烴˖^,D+ʲ.fǥ|i&&![?W ׌Zz@WցdL!-(*[_*P\e,:uy? 7 Œ9K˭.#:F C] u=d\yt XVQR*j|H)VsWL#,8ZI}` `mLJxחy{ZBffn A@=Ƹ`ɤ?$r5X[R2,5AR鄡"mIr6 W f PI&eY?j 8iwl2J΋w$^)1Ir-YἚ).oOBҌ!I)/Lwgr2ZwA~Ln̴s ׯVX~UU@f.QK-9+R 3\*r ƅsc̱r2m-咗oM\f M{Ǥr.jq\G ҙs7WdhsՃ=8Y;.NV(FYVl#TIMls; t3Oi}>m ˞u0܉*õ%"h!eo vRLB*˼3|g fw[+m; ZNVc&Z2*AAYtgVαZ D^m:*o!6M^>|VEON^g2K󵤜6Ghg7yD&= O9 ,ѳ ǻZ9źiH7yIHX=C=\H,HBdAUvN4}jC]38X1 8P|Oڼ|]͗E,bFHRE= sy2\y2)@-xǞļK xsO ,M䞎jDe^Xi2lh\2xڰr Wśpb=޸$@K *FI}4%t,sM8H B;[iOHMk &r995 b]M'*[)~AjZ)JS^Fղc3ve{56y0Q9 E L2DwiZQ9`g05Sa%*{WSoi*MFVzכŶwXeZJ;sE{wYd:=-^te7ڠ[YQH=,]qʄģy<FuͶKW*LSP|?m2-W)֤W NV2aQ%H$4`d2LDĴ,b kB^Dm8"t*<%A:~NS iD"'N>4UF)IxL$jJ6#/_4Dq5O8:ֶ*o{ ޚ; NE`t/QIHg^铒Rvԓ&{4èlxzYa$JJCNqT by!e3tw'lbv5A6WDcZ5fKoL1QK(fҸgWJ\Sb I18CqQ/ԩyA)AQ@ N&xs7EfrRa羶u#trs1Qs0RP0u#o>.}o8W%OKU6gʚȑ_a}8cffbc;:xٍ4H7oxyI`Yb,T_y$Q+ɋk&BQjŌЎXb PRUeI H<-5%Thko91(sle9ee$I?XC(1wȝDœH朒W3t\U?&M\nBsu u!r!rN^NXeL g3@dN,*_-W-c4F$cN )J#ɔ@ -}`R9lџ>ކXgC֦T)X:/R~p/>r>Lȟ tԳB[3o]r%'wCSc-7^ju}wu&UZl zl^3ہ^ o:V?h *Vzk4#QxpQ&@e[Vrѵ;-qaA{?ma_6dQĔ3RD 7=3*SZ:dBBLUwFV b5z(SHC[;}pMg:uw%_?2/£^l5xqyБ`JU=x?~pN4vXہ%d1K*qǽT=t+* Q(N5\FdK胐,M> cnɯ!$ *2ht'R)m2'9@p LedX=ܦ@!%(LFpP hm]=0s8>DŽ󍿍'xFk,pffwL=O=sѫK:7[Igl7 {(@henȠ1 nnOO9t1f;߆NH#̸ui-3? =,sFnRk|y~[Oӯ8 4Ɨz65?qiӘSߝ~SqOz\v(g͹w~ s WiѷDhn\UtBтMeDnQ!\'etIld"&f!q6޹zd! / FzR9lU'g>qG"#wX :/;J3\-&#se'9{L%9 zρ<~/o)  r;5L7n0uGs3=_Z1^=^3QЧ"RJdirFY6aF%%)XXNpRYERDHH#Q;4cJMxSV1o8VpƳcuӄl8kR**PJ]y!@9A@ΖT@#NEnDYt%P*1d_n3Qar) ޹o A\pMp%Ff:94S/0 $In5mfcO;9VˎJ-|E$-ב HKY;U Xi3Nr2¡IŐ3&dee9FΞrw f-1*]=.B2ɧlJKeAZ#V<)zx$7LQ]K!H" fr-Z О"l_%;bcA8w~R'0vpsUZDFq\,D G,T‚G1j̷NoLJ{ǝ'uXݣ|)Ҏ}'Kc!nF ;)aLPQK 5:yD B2j%Jj2:i#B={,Kj,Zᓕo~x'fYC{'=KyHVѳ@.ԑ!wЭDr|'cy 8ҡ*K7AŰFoNk!M;d/q2HMlTMT+jї{+ i>n@6J"4 haեL!}iyBlgelb%z_o'7A .fEaʟdBlekhV~|ivIg4IAH7.?t?6[92V-ءni1b.Ct&MrBz9&sA@pKD2=fg5 ޿q$շ.q/Fs߆ Գc'qq+4W:Knڡ) txxX+w-0KNn.RnUYYtTNxRh%)AR \+LQ C5N@Vs9 1x)L~VhZ)*ly<66>-wF2;|YwA,XPX(heiKkjbJw"Z6߆kOd%k[C Z4-iKr+[{]ǫ7-fo+ZiW%ٰ0c›#03G;* >]p摟tǙG&iVs\a*GAd4&'KTx&<۞EFSwB%c]N:Z5[ ͷW Senݠqwp<᭴qRao'N S}^( S`DhJZ:!\eW5 h3yV7^V#m#¦U]y|S(?^wg 71m,ߧA%ޞq8J c!vھ_?LK9!~>+!RGwѷ^jjv,}wIC<ܥTèc<,Y`2$QV#LJ(?#g>M?2df@"ijBNBgCGo;5tߦX_Atma{dZ0vOۿP75ڻo/w&x|? zo0DLjGcWϞ|7 F RGGˣˇ ɿ+Vv@8 AjBb49(` D2 &|tHgy.]ɺ<[YDDdybȥ>$2O S:e !yT)BJ QzIJ֡tYqU6J(X{\"ҠZXc̵}j1W% xeюg[nw{gjvڙ<5ǜSG>ķq )k4&/FYY#  yH)[uSJWeJ/^]Ho&ѴZ^ x@<H8L<8,Z%OlBbv&yM( SM[vګ/nW)V않&t&4ɯd[{ߦEn }wɦW?}7=<~l /s]V{᫬wxЅk_еK?:FէmZ2Fm΄<G͹xdڇ},z{V\tP*G}s> KgTNoܨ|tMZD:P׏WGz X $FcmqR1q'd:gU)ϸ{geN֪~w6ׅlNP * ^N~,gO,כg/͖ma;w'^~;7$@cŕp NTx%A,x7)ZȨ2ye;R o׵-Rsݫ|B1F%pghWB2CΪRp/<]rӳ8r{.5pyH>g9`9):,"+Yuuy nJ=Sof%g.bF@l=?b bjtƻ>]@%) Q(XlFZ!C?idͽvm59%ht.aw!Es r˜8^a޺o@}i<2=:' *`~S!kh\L5:ebLZMe&Q1&pCB}D*5/%y)oHrCAKȱ :kjQCYU'R/.J`2W%nQ˯o{CI[76NT;S[A %cacEcW).n>yXʲ&Nx0/^ '˖I |Fa#*hcd9LԲҀԚ;XDiQ-2)մ Ŷ0s;tbpC+5 %gG[ksvT^t~qv}E_Y;5DeMNkv+|MnbTcd*rry5_ y`}`Q&d1JB:]U]""ҦͨbB" 'Ȥ`\D$G"d ?~lo'{<6 E{ƅ"Pd4nh@N62 ^1E[/l`*E#h>$ch}8tM t+/|ط4ѮQ90OLJ]'H 6.J<%iIwlsK3,cINy"]BFΚM97n]@o11t ҡ e}}r ҽ$t8ζ#߳I^N7 8ݻOc sװyvffw~iE,jqőwWzC:jVWjzggQA"n[Z|σz@OpEP{W\WZ)w Jup•$%R=+"XH*j/pU,2pU +U =BcF*7Jà/a(,@ \r*Ԛ׮JtW_ \wU k[pUȵ/pU2׮ йڿDT+3WTGsVʛ-lgM'@TL%VoTH9ŹѣL 2VpF|{q f|vD.jaaP)W .3ʼ$ WO#װׅ'Qkzz v vpsDxe_ûwpΜ mx$`ÍjR6cՠ2t;7uP$RtA*Y`ApH!ETRp4:-&i*KóR#3qo'cVž;SH:t'0fz^Kd c TlhA0$ 3eṚ\ rHA!w$)ySS(ܚ‹{냪朼:eBI: c&.si,Tެ$PzrEmH2,y+&$< ! 2z2LhYP[#qAM'Ō5t/܉mɶeZN,ՕFYo S6Dgt&I2 y2RcPIZ -UiJ1f63r I#uCt))*q.Rm#ck<*aak-bX(ު9*~f7KexQ~gh48:qv2(!L(} d=i;#@Y"rBpdh7-5MC1\BMIR"jQg혱Mߐ2 ڴf-rGl?sWP5E˨-:4؇8[r1bu LDwt+% dGN*"f4\L!dEL&&$0AJ Ĕ)s-akrXk8cJ mVcr=QѨ1ȒF4R{ź7u%jU.>X_8t^pVC'@w[ϹJZ&тBmdb $^TFjg}HFO 5Ъ_k:nߥܤm^yc, A9$ v ʉ$~RgR:AjWqnPG0/IKK\6gC3,cINy"]<)7woC7yW?I<,n.Qś@^5<,ۛŧy{ 6`R qz0SOf42иe^%FGխ1͟f3}lmn|{5=_8\U!Ĝt6hovuU'㟮2>9$[N(vmOBn`}7ٻ6dWKŀq"'8y*Hȋ^DsHlcyHfzz{ꮮ53a5a(&l1xX$F1=;.~;SU׆F_=v\;3ȓZ3,xw5 ao#pn:"5z{%uo9E6ou ٝFf% mߜȿTbr03Z9L0+&uʱ$Q -9+R 3sshs>5یҋ"d8{IZStdC,|5*Cz ZDEc.*541VFLg UK#_ЛsLBBEǫ0M\}(WSzϟA.0"i*ETS_Y07CjlW~YY;>yw|0~|_:x?A?ߏ~U闟j-E ӠZI eQhYzKςaYJrV36,4gDfgo+|xC B @`J%VIh]9pl.[sW<po v mcu|]Mګ*[T2L_0Pӻ`܏JW-Wف$Pv9|!Mfcix?55|r5\߻zkO}x/PTXĠbFVp^lﶄ% ]=8 b#RmSvIDF 屖)hTy"iJJpp^VPҠmPV# $0Y8% ~\nRl'ڦV݋Cc׊z =}uII墴N(1cN-c Sp ^D2"EN ,.]A-7OZJ1G8qT2#,Plb2|$aR HCA;őEq*r9J/7CEh( #, 1 f8'4j΃B#-  .9#)vҖoEږRvmlSdWᣙo@Y 5o睾9xKa9^jBh0!00V 6r.]h7,@Iz4^^&X0d_O}u1=T3\z_SLCZtN-B0#BXYL^jʈhA #Te_LhP>Z#P}3+tz*1|wiX|JLviW=q&ķU`tY'7ܴRnh`;Yx9ocajCTJт3i7(%*_pN.!%$sdv sɳ$AO,XPY(n}2Q熩aJokrdJ75E&57l 2lL6K2]Z}@\xv7 =/=NQ6.x&dӀڊl :YePo'u)*e\M-%EڴϝPiXF@'}#7yyWݎ7I@.۝!!1j%X*3$avK¨ QbBVBq*DI̢z`.0E۔)*1b[t;^MUM]Rס]B%O? (&L=OzKEES(nq]\kq!7 .,2.E Tیҋ'.Qwv[6A0ɵ r B($<3@cWL#,8ZI&h `*WUYRKEǫ0M\2`|(!ћ^0ȥg5}1  +UAIM3e<ϺnLvvkmIGMu&-543/όgSz!VS-Yctk AA=ָ+zQwv.|7v8(c}Kr$YC^LaP(.b0EkLTQe˙ W!:fÃ1 X w8prLȄHB}rG2^ßSʜːqN'n΋W(_>k0.1ǪzV߭󅹟\էԶ̿&n<ʻ963zt@h+vF)ZE(!YdYs18$!370>?/fCٟF>> a$ qߍg.W] .äkZڊD@gw1!W3|"͞'2[C!fH YH1[,} *EO䉞hv "*ڟ77օ2,pƶCZBZ9Ҫ_t[0[7Cjlm8+;Rwg3|a(pp;=`COJ~闟j-#갭Zx l@Zfʁ,嬴Ϛ3ÝS:'p%p!Lp[%u瘬leWA S+Ejm7RҰw>cL?n (g&RoU(Cz^.s+Vl= GލG?=qk,rg&K%3]io9+]`R" EgܘF=cE"O uPiQ@4:}Q\LsKJ NO8yr•99=EoKs B,)$UKEP eY /FDFRBlK=%^i295֢*I[V'[Y &lMuF+U6{ѹoy.)ҘlwHv"ʃQ!i|Ȭ Q$ ekSb"svnHF_7me1\٦Q\gqπwlӆrHgɧv MgoeCP U Dr6Qq{yiV,fk7zX[g7,xYk3/G0,_h8hWc74v%5 ֚ޓ͓/@2sw*QJf3:{3=:ogzI>ʐHx&a]T tОA\iޭo)Чc]NC mmZoQ_/__}('`5S)nrYC{<2j5)~Ϋ{^X/]+ھ}1EѼx=߼i_p~~UxCgx`ՙ?Gr|t~>_항x2=i|ⷚcBkI-iufp}3W-+mEM:[Lˉ]ytrtip֮mV\꺱RIf :RF?OV}>cp, rt~z $R2_ʎףtt G?ӛo8OsÏo$xz Nܹ ݭ鿼y's]4-VNҴrԋ ޮ!i_{*c*PWƮ_$<]}K(4W틍ї9OH@IQo0F (kBa-X0(c% SlEEUmXKӪ>8<`J)[62HRn~bR@v! H :*Pig2q@W;T xs]Mkn7q-rv勏&*LH a:Sg_1w[rW>^з{u>f0^e|-$Td\H K@giU2@Dn}_o\w_^/;LհؽGe\>y3.Rݠs! LZy1ID0"dY5{EQ d+bPNwޞh 5 y- _~q~;7d(<ņ4Fkts6==.9 (SMF+oQ79Htyw;{ЏK~b,nT\˟At]_(VTy~z\N?{EWXQ26:xM% 8=$ID%H"@[oT$J3/JI%^* gI$br߈D6યl}YSP=?.2%1Q d\ʑ0 KQBk-PnCzw\a (>W ɚف9*H"hpcE;m|̟8LE_;[z뎽o:o|6hG\wbh6'~`Ӆw"M[Y<`-~(KqMPh٤Ã9w8ߍ^Fi2 Exܿy~&ޞ#~3Lw강;OG֐̍]8V2zp EgmόPܳ(Rbm]/^νY^q7Ĉ)nE/J֗I^Me)/hw9_ڟ~Ѽ7^}jv4w9;gs} ^h|F5^_W>{#L[тNjçG!Uqh͢5;sA&!ihjMP%$ .JWyן=As>"4:eQ Q@Ѱ(:iY eV?߄k(H4@Y裐R 5I(B)2C.D}8 )1e֫L6+%c\9dX$I"DtHY8pPrw3DR]rت@N?pD6H l+ͽ[,l24:)9ٚw%vW]vH7pi>tAKs0&o6Lc KCr7fX&RMP)ydM52ci T`U6ߐ/m!}!UQ76^[g%evKՋUx wɍ]Mj7 PklSo-.ݎn7W9mtUfȍV%. {/ o|ٹCWJnd{ۗcEw0z.*Rqt27s{2z6J4_je[?;Qb`?m[!b6)4?;0ҙ?{֑d ᯛkwwU  l`.A?-²!%?f1}O!Q DIWiM򒷺ԩ]U]wlTOԱEX3=Old?ǙG>I>jaagls9XF)b1Jyk5(UkP>Kme@R'+m *9:$ q%CWJ,qyz9x&~QcjnlɂHzcnMSQ8i48 & U,|F/X#?i-NIwBJD;Y+HZҞV;Һk{.gbyz~48Y5~N)H։}H~^|Hs0_m^&ZjLřH)H '83YW"(E2.Y#"< "4]޿v#0|?`V#DJ%Vڻ+ɂ%: )!QuJ&̶TeV%)/}q2:(]6J%'R5 I ~^qDܖL>pr}\q~ nɧ|Z^lI30[ 1RYJ<$Vձ+l[kGxNgGy^[CV>shٴB; / "AqJʘ". J)34qOFo^9=!~jX&XFe5iZ9PV}pF,ڱ7F0ԢfwCF^hnp}$}w!KCx3Eۻ,$q5eJO,Չ䲴bUJT!)Cg[Usc'S+.YdKWߖoeZ뙘9EX9̓Dw\z'ZfO+S V>Hu"VC&ᚼ~/d/qcv';Ò :҄r5skWZxR Mۻ[Ѥ+M2[E!YE[=im*(J).qo8߳{9( ÅȓQI_F5M6#^&K,H!x6^ 4YrVr8`vGh0½Ǫ8p#Pj\^(8mWNjJ=1:>>Gdue|t9BCT3$"11%sKg+l`T8Rn%99P,\eUe΁k\V VR:RȇE:24aI$ιTUҳ CSĹD}i=wH|THN*$ow#y!c9~2ꓧ x}#Ӈ`w_WCh*:">ڵ8Ԝ#y@>.t歊t$8Ʀ6giԉ{ݷ8] dm]f\ܿUrX&ǧr29uM~GeSz2mLݛ{}K\O>~>zj~gahM#>X_yofq|<x_rR6vpPَL녘^'cZ0qYEs[Itdf>AvEr8 E}WV1u}Sϊuk! Q$)mq€W1"&Y(̹k+e^H{gk õqfE{Hz{}Y;}]|_j6~z/D<|'֏!RW'+xձd?8BD5x$"r؋lE@Kk@dѪXmCN"DeUAX:=^L) {|x-#jnMY ;϶ޖ(.\D{i>{RҢ,FH11:EPQw%RDRbx)AB\ "cuIZB0D؂n ʳ;njOFgk_c':am)1|qUm2XR^Z/Yn+P>5aWh 5h_xL;.yW0ĚϾ>J`F7bV'8㤥'v8)7sB2#LbIp ;[8k\o,z ^bo47\%,mեckIj[p +ɯ˷~ض,yjdׇ_NK}L~/Ѥ&Fˍ'Ӌ٥՟˳Ɋq=xp+>jZs(pդr /W#\9`F \5q<jҺgWM ;W^jiVp&:jҒwjRW%\iep<A}r}yb'ɏ{u}^O~- 7o?0//͘c,;*)^OY].XY/IY..7V˶0iZHH~{/S\V/SZC/SJ_Z-#,=^(:$j[:j:> {WMJGY;?$յt=>B.qW[9fw>.*$&M\g?!-}I)G=gK6 WM\pդ܈GySF+ m ʃaWM+n ,#\}p7ҝ'=jMK5Z?L>OۈD}PրW6atzK+]̆NjZ?-oiU~qappOgi֐75(%ՆvU]V`_W.R8 izj;;Rs˟~c]q_k&ݟck9,ɴE'L'zVZZjGs(ʰ ɕOuiTôeB} mşoSޱ(wC, lbdK9_~xKPKHiW4r2(MlHdd2I2#EWjV3&^i igtzVQhyoZuMOruŪA&!&{.F ي&#D-[,+2kQW1D>Z)!؏JcV)jWJ\&-NnG1H-~l[ed{"h֖J*'\tҗJsbx\0UR2<Q^i1'"ӏ[}T)GEV-@d)Ib"9F&ZuKw1ckmǬ:T9ѱIk!c5GpFK(3{Va{,BJljɶåXIehPA+ #` q.Ui Vgkc6: ,Ԧ 7B}w̺h/d I #H Z>m\)SkPfYUVX٨R(j-s 3V[TNTcrYuؾVһ!pBBۤ-^)01R,P+-%CC|]$ZAJeb ً̽ž4_L9'搕aW䌬>I FkE„+u :RȪB"W Ko{ LaDjOXxR),"@XX/`t[5\ v9TU ~b/Lǒ5XtUhrJdbcu$*8ՆmUAޤ', qAYEƸ0 k(IjٲsL)e*.A`DZ1 +t;9;xÜkQ6kd${P2[2FEBpA-+ vy>`lZO˕3fq3F0[ pY0M|00'[ L6%")(ؙf؀(Q S rXVWi_@iJ 7gb:.p3SQ T3=!h~BX'ۿr@/Uѕp ]Zqʺ@J@/K&^ZUˑ$dꥈԐxYRC"H[7rZx">R3&BiB?A5Hn%,F·ΉZQ}"5js0@@wvr2F/eK5u,x *`|kmHEC{1H 59ER"G#Rt5"a{٬۷>]9B&>H`"" /xsHu@KFqtdYt4ҕjI#J2a1'8 vcl _fTcA39zjN$rĘV F^Ue^i C#]lxGi /!!dfl~C؜NX)h^ im1V =li`F+fVo- Rtii魊#X}`6@*tI6 |ީZ(a* 0dk6m˛xڹg6S]fcq0_Mg::b&ɒjd0uvtrL'Knc+a0`ض{(b[SۆnMEZk Q^yzhƤ7' =m؈-G' 4آT |'Ѝmms AQf`AvPXfRhTES'2(bw#2l)b*'[1( "Ndvikn#3`1:|WV0 .LʴBѢa6 Ed$ӽC`ͣUu +~(] ڈJ5xQ380)`cٌh҂c:&EHI^n^&&ci` Z W-EB9yg8T赫M/j$ZxZ$ScUѫdmÛ>` 0Ҳ¦9fxreR.$aFF9>X2'QEn7gSMh|4D2_%YxY 4IFCtڜ@؃:hIվPF,?u 358 N%Հ_n?j/nN/j ܳ`rvRD1}9=嗵G$^ӝb~0C!JhCw_?Y=Y][nA-]~hr7ʘUlua.^y*/R]0t+{ˢ.WL[Ɋ5?iMjsXq$8=$F\\-Fpk{W/Ppe-g Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,zJ\pWdnt"R}+Rq)R\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł\iC<$B\\mFpi~W\D03,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\WO*!! `9 J}0+XF\ XpW)#Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b Xpł+\W,b F\}`nWo^rF;z*aX.BpTȁAv1a0ч\gFk{} a/AvՃRK(]NL"s+I>>]1pzys &]QJRbY$w*ڨIWdBֺOWd%"U!C*&O7mDx(銬fYUQF_[= uoV2ҕڇ }sJ|drC  7B_G7Gŭy$^;041DKӧIqG?6 M_MWZo߼yV]7O ?'BQYAWPIYt][=m -/z"cM~ȡfʊFo+/RpQ+:.k#_7|'جY{g6˲-ʡgO/m@Xdq5>w 0{Kvyg6'Nۚ0fVu_&Ϯ&C'e2Z|W5K]- mh.O|G2QKHT%^RecSVΓ1' CѠV֖cWhquZos;Gsc3a>s >kh/N\%}`D1nq؄hVhkS6Hv**q9e_tcswl )G1 I$C*&nFVU{kT2%C΄/r3{SHjk헕c OgG}@3pܴEFL9/6B8d}c{cL? F~kO}}m g4>=tKJ7?lⷱ ־c<1})aB* :?S{0uFk)|nyZLqG>n͠E OCٺXTV{H=ji 4ELvBکnyqZkyn9%D(L!؄LܼQ*E ' # ffB#dv(CxgLn>K׭|?Q-zA<񬕋v 8onn)0ޯϏ7Yɀl6\N8vMx.spdtL vfX ./>| !hG CR~Db f T|}{ 3M1R*u^m]w*2TfKO&Y }k5g " #g]²1M*$}WzNQ'1Gh8 Aŧd*ggs矶.Bs9][ ^2Ae3:wN$pfhz9 r |xsoW#6pmoZ[txpaqѝg;nEޜ7;9,ufu-n\;vvNwuuwYm|uw4\S; 枘ex-׮yW\p{Wlzu䭿xK^ٸa7Pev ¾ܘ?7 xtn>/&H5Oq0N!Ǣ|^jeD$}dޓ5d?rlEEJ\j>z )r9LgEz5D! ?*I=Xe%l0H ҶH3\Dp\DNIh|D = `WXM ׆}=1LޱsX_Z1W :j *Tc kW!+WyT t»T$x,ޥs'z9;4L+yGM.}8ge49/F|HU[WiSG)hTNJZR^$sͨm4%79ȝYW(ol7krm6 \^EQ-dٻ6#Wŀ9bA` _%%)!g)qDZjI< KawOOUSO)K)Ξ3B zQz~GcaxXw{hpٝәWzyzd?~q8J/vKSEBr6MI)ypT{JDFE HL\A,B+'o0 56qf3RFIn hD t3q/Zs&Q99S1;OR@ %' a(6=|n;LJ?ɇ='ն؝= vpCM>tOM$ړy΢&eΊ"F6 9pCs6bN$4jɻ[]}S~cΟlR/ lX37H0WGufl湺kz曬=ǟuaM9#G=AMp}^}{!m}0b ~߄)P]M W+CoPWJ4^ISuZ{!|&m˻+դs]"SnWZgNn>h+N%9^gʯpGƅ+C.&Ǵ<11s@o3Cjt?|#𞸟-f=oĤe~]Alel b ΃(a5ڙ[zpit2}"n/OJrЧ:>E7&mHMR*4sKBq[*X:|~4  ߋLNAG\ɵvME,!TsX\Jbq*'JT3mbY\x+(eƒJ{փa=ENeWnJ'a%>*f  wU$OST?VQEEђ(xL,0Bjcbq-VҡLnDM$BFcBbz(L+Mx* g7nɧcG[CPG$|Nl_{DЇQ%كA8r'QWh N`Q}eռ&Rq=" BTeإG@@@4silշ«Q M:ԧu|l/F@x<+ZEVjnTkH뉐CZ(.a|T5~^I|TsK t*R%VI`^Q׳B]7\ekZK@2D'R(U=& ("NDH<a :Zy=*nJ3Y$Jk#)M y:W޵ӶX{DPOHy]8EMm'}5+ q#I!FDsO&bo_{{qKBpGA2R\@Hi*Q2pn\貆P6}li:C^DVE7 ')޶_ m3Y3],N[}<@ p;)0IY 3s* ,O(bVIM.d)hĉ`LxgXJ:q6y>LG5BfpT+ņ[gǹho8wuկ^mbMn,ȳSZܳ\_U6MАZz%+*m~f!Fte\]GݯKWC x>RUj;ĺiLѭ(HDR.( '.)"4>$'JPދ(Њ35TȅƉ! 1AE`j\=z8xK( gOx%FG2BuqӋY.P^^DvovQ|AَײѢw>.k19ByZ%h)Fr؉k9 苑4iQ'ءsI5Y0kui @s ֒ a E;`TFCEe-ܗc-P"/L)xO?̰HzE.F fT]/F)Uo$?^kiGmM^ycw#WZDi#UEDdXT?^*]$G9<|]6% q'Y.I')"r@,gR`TqL}3:USUʚ5Gjڿi8hQ.ɪE1ǖPwdb0XWt3@3μ M%g__大q\TB! 5vzP[[=^{[%|AeA\ˏ8op\G''YaewE3զ~mն|Q PtR; {݅].ۿh5Gz⢆}j`ڻ9_+o ],N^-AV9sF46sܮ䧋Eo5ctmgOBn鬫ݍf]XޢFLk3ذٺ8/=st6ZpZw.׮gUJ+%XH͇trs4iƐ=mfɦQoiTLil4i.h7o;珿9ǔpB!/nt?ߵ]s#ږ|vw(p58b@^W۽P Ax/.ĺqnY2}}o;I+'*=IhZ661.N:t 8p[;Ѵk>i򗔱>!荗2iaw_ B'2DJ 8NJ V\}8cqܣƹgLYqD$e\`P֚jE(MM+^sAf%8_楿gEťEYz@. TM)jOj:^g--9Ue\T.N4pR&s)dQeMyf SDF0Qpq4jiǡxh*GăZ}]]9c_s/6brBяVgHXVqy-EZ c$) ) 7^\y5pU}tWEZ#WEJ+'zp5UXWWE\_ \i;\) •i++WWE\ קUr-iւ"j`R "\I[u>]mev,SkaB{;:Č?0+f-롑7o8>2#ѷ>-Ի>[_nΐ]Ƨu{ swt穷3,m9w1 9']> NI;D i7/u3}׏/onrt92N7v-wiwէ_Wc]_(U42dN$5܈l㥷MY>dV'-?-D7^ m3k7YXsSZf͖:٣ކ!l[flJIG 4} DRH9tm9Ydc#e ^tғ:ǶJ.͠)rW%<2ہ`GkSΓ4xJHyzY?5j'ΘF&kTJada$mڸ|^O,'wly""e!9reN#1Rj<(dƕF^nc~aq36خ\*l/atn29W+mw]'vB0GodD ֈDmFbfʥ㗉FZ|tǽ0^E#Dcp^9 Z>9J2f2/%9sM~ q$Vfiuj0R8&6/9 9=SwUrhE "{2'zObП htC/<_fn6f;!lh䦂iw=?|;x]d瀙Fևbcͷoyj=r8oָ<&u?wwX|ܞj5hf}Mb&B|U*},+!F2Lnƛ@ ԍt g,\J3Z9ST0I#d,$Hh69LVg<V7MH{!<\2 1YY IXε%yݺ8e1P*1Դ3I޳wEgݷoˇ'*R*Bi9p ʛЁgJ&d2ewױd,BǾ= қPvTVI22ppѧ,RhĪvXvAYIuك &im d6JYlghg] Y]J_%{ .*Mjd1+ \Az l Bdr71zw@=_B􁠞"\ch5SHK^f:2TժF'M~3;w]QAA:SqP2OFld3J2,A;DMpTUKj:q`]MM3!tBgA$Q^pR&RdV4Q0G%PRsJ~z;"$2_%u5㫦"V6͹d馭Ab4V ,((RTb᳓y32v0먦!Zl-n^YZ i!7 /-(')䅂MB,ϷRN8~P! .5kЃtSS"q8,Zq$f/?ή[rYÇl$/fMVО Gc]PTNBȌ:6e}$yiEFՍ!HP 3 Bn|L.ćMd*d.3[Wg`;pŗ ol{R+Nj͸ݴɦNECnӣ/.? "W,dgcY#JBdQ2E4 E&4$Xc,f2&/Q&M,! -]'GHVZdT!FSfNF$'<מKb@Np20Tg@~B~s_砼1q1Ll-vЈF%* 츩9nJ<>نg@2#GuH`C%d#IAT]\F9OY,/? zzgBo yJ`(\\ VGU yBx2ف8 25 y O.?>N !8yaxVo0gHVnHΎBp%$G (3Y(;Č[cK-zlv(A 'ԣHQmn 8\J|=.x^2KN`6H)B`M:gC ֓K'HW!wx5ϞO.>y:y,1c }Ld0c7T^ˏ߻j|~:L=fEE=e\wQ#x;#D]]^8zz]9J%yiSˤsfgӈ>!:Uzո3z.iوsB2qԒϪFxaU`^)dC2r >jC OZ2!K$I+3ڍi[+H%_QNym .U8gGNgN,Wi?|mϟFKv.=1|&bF x4E ;e[jx1 9q%sF:h܈l㥷M EΐkD,IZJW bL(b"e; 5sVuS^ֽ&=A򷲳2 n͕ӶPZP̅ԅĵѥM *IDz"x#h"ܳϫ;WWrx!"pIv1cY(A8H>e@4.Z#nCUxUxD-[+8OLݭΦrw1;YfK#ttJ).Y D#/^<8鲕ABɋT d;P:p 3vl799 YRj֝g=dzUkunor؍SD^3gbZ.r=|=<=<=<>re1 HrAR2.,0!qD4bRJLvu&&NJ.-vOT1z2Ηt; ⳧uS|҂qEuXb.BN%b RZ.ћH'MfVQz{ ; Ѵ:/yߵ13s^ڀo{~LƖ+~3I[N AAN'[W|piz)~t3J.o&=dCkAg_T:R %M·CBdsN9r˘ȓщK̨l̾o8 B^x[ʓL>ˣhg{d{X=n~v9E_v1Ӈ 5s>m>W~Vu:$ I0ӉF(42%eLcQwVfvuBv'P*{H"Ervdh$@VsX*K u4Y0 18-Vge,:"J{*jݹcX٭&{`=k/ǏpC $cPf}>)DRP&Dse${VkI1H$YAd+%O193'%! 2$^F53VvEMmfB?Iq;h$h"0E`&_WMɢ=9[n_ީ-P3]vkFU0eCtF'i>r 8VsU1$d-j!dTSRB)쐂 F꘭ѥhrN@e2R-clJ5[XM2k ->zUEAsqm_a;a/hp8<<^v2(!L(}3=iw" Ľ%2YN(8znY4"g=G^&Rۨ3q;fl&A7̂6_uvǣF1wjWZmQjj6'%g se"yGR`kD^e새ulJbȄ A4ٚXPz]*Qc2!BcIJZw}Q{f\;YKJkUez$ j2G5IR*i9'=*kN2̖&"'l{(vqWa5鸯=4=_4? [lt5rn^dj0)Y)Qʊ;]EM.shF56I/h:)>Kp7iq`~y?g~iOx1;Yx7t _u}9|n^`4bfj|Cⷚa j mgKBnn鬫݌nf]Xޑ4Vfbrmn֝rv{Av5VyVHZHiX}< S2yP!=6f$7UxMĿ7;O }/ow?9#GZD30K$@a?h7{j5M-F#Ҵ9ńooW1Qsfkp VY A4{%n_@eᱽמ&+WoGI ;EXvU{Yb4V.IJG( ȥ@דmXZUoj#tczX!DM$ӱ)C. Kp\cDK8M.4񗯻$|riщn, |l4CWwb.f'kkS{YV{eq9pɲW܆\ 7yQdI dDМ+,wV% u g5팳'nrr2(tT9)ugoY/@',w@~}ߥϷ) ByE,Y*!~WƣͧɷLY BS1LF$GU'2߱qrv's7G,FaISN0HJbR%&$N(Ơwq$GUcpyEŬ<;3lXx@ItR}wGVl}$d Ǔ"25QYꘒ2Yc*+w) )4xAI&pP,f8ZzU#g3"8W9{>/At5Xq ?Z4WВFw=6O_RĒȠy&f" B3xrs)d'@ͬ*;Eo/Av/iڣOlp{trfn!Kmg<}c ;Q|m2P,WpxyqMm? oV7wbf!k&}t;nL=rϴD VZ)Us1:? 4,P4J8:MVx@'@{]GHדUr|^E Nu^ARX/,gVa'‚ΒҀ T i)t~թ)B5hCN7)!&+释{).RQߋH)˝$%Gp !!1瘃1fO)d \Vz:xKpxuqSr ]EP >Ԃz:b> T -Q 9,{6)\lR { yrgדB+jnq'UUt`57 $4Bbg$t"wF##텑΁#hzN(VJ dFdU:./DIc M'255da d# )W ,%g29qw;+2h'K]>swh (f=K31鹿 UAyTW $Sw,Ն2Zs{)R %f: JOooF(xe8o >,7KO^;r;=#C+-!Ӵk% .D%c1 -t_xK9-,ufXaT4XNz"*ܐD'A@-HɛoǖqOp\]hxz a݆V4P 4cDA6(d}sh] COAmІ&tP!Rª(sPhI{m% L&P R]A5 uAq\n!I!e=pBHX<-w ]l} 0S~0Lu` [5GM 95炛FҌg 4#~#/z/mz/f!q|T :~__s/XwZ|GZ)yHLqj|C:^-pT2x2G'Mlp57˞]2[y[ayzC_Z'V#v|n<::8;?\HXiPIĭFђ>;'zKڽ>x5kRz7i:7^ցw-%Đsbqn~ `8orOqV1j%V`Jg,2ڄa5 Xppz5{5gugU[!WY~-$r*3) _Xr̽6ɱ-%9T5jL&&{ߗ??|w?sa ̄}.0bHlg~ݍwۻ_ZKzU,j>o^W3UW׻.xZa<;q{ vU>IܷQk]G"TvKtdZH}&m/I $+*HJHzloL4|GÞg;g 41}!#ӳ1K>J.>::(C*:c<ߛHZLF^`a 0al' s~u׃7O>˻ #h8\3z3o[u!7LG*nTdQ h)Lee^dY g_M?OA]hcӅGR}2  WkݛC{nj:<6g7hz痶ClهM8\\=z~7q]N*N ׎qcjx9z>~ZaSoO/vkzT)irF4bF%%KH_\F 9aV1`,R29ü6qgJMlYݬFLV-J. 1'& dq,y9 C)|Ӫ t2=Mo~hMۊ)p"F0 \FlFE`Х NJM<} $6Hח' 8AvKوV!Q)`V^fw!mu y4B 'FFNr0p¢1Ie#"u@ɼ9[U*aT/mIRKY $,A-,(<)4\^|';TQoK Th)lB..dP`wmH;s;m,> Lb,7,i$ٙp,j-m)ne"zV"*>At"de՚N'%3?&̴`c!2\DN- x;Z_uzŨT.*i8R~2|}-t{ZמCZX 'x K2E|4@fZR- r2=X}ƦOl`OVf~2@%ɚMR@"9~%磖*W "xr2/P /( (&j!BtšW%D I`!G='>b"Fruf J( Q1x`rj=kZ#A Cקeɏr?kp<>mlqnzrj;&59Yfxd<;i4G} ʔ1qO`IsR,Hbhp].耧OgZKCX}6G.yEpii2ylLT;iyn0uRAqRoJ %!RnƎԚWBh`U` ՒN[,h7B_ uբFfm(Y)Em[vw?t͉lI_jijW6{E.r0`\ *ruP-RN.vcBhGඔjDrFWVr:8o5LjF' 2&:P%HK6?5F{ Rܟ|8aNxz~sR=˾ d|<}S(>B֠|O;0\8PCQ/Ӝ4~nA,?zto ęh4PzI ^5^B7Bf3eo#Hq5go.m4i d 6~%3Z뒋F]- t~꺕W&RH\x2)Kڎz}mR5$]J c̞Pb6h>ĿuE~*~r8}jOlxAB " Gi`fafB=2 YzOQ[3]2d:Dm-[5|d*%KjTˤAmUg&4ުǼlRUf~jX/eŃy#5#:ƩAw*Hr˭4 Nwo{F>L?JML4t0P!`U&WCL}+@vpeJ2 \9H&v:lI^_`Ma֯?}}$rvFpvdɎN,'{ 7z_?|}ܻgLm^7ʀծpkp~k(9}jȻKnW.s@Wٹŷw^iՁRTU[Q}vޅF[Rb7-2N2W0B27Cm> [u=y寴1 "ey1vԣd֫|K/eow[@׿F.5YW9XzY,]P:' *L(xTpB1OA{W9xV=j u&wE8 `4d=],[5u@ ̉x8I R]nuzb2`B 6BBTdyn@m\q@~Տy-d2.1QZMKe2Կ֎i{̞tdգ CJh|r(8?ROr2Eǽ /~VQveQW},9>2EWZLqS^5>Ypoyޯ?+ss!}*V"͹{7^ǏF$;FQs$hBNll|~>[֒ [!'ol$F:=B<]DcET"Ol0Z|_N޾Ƿwߞ_<ɛ9y㷸W`=nUGu$h3 F܁[C͇6qРyMq%mNfoP FU +߉KJx(o-bwG}JlYiw/H YX#s`%%mBM T R9 4 3a.E)>aϳ:4wB` LK<>s7e=uI-S)rMjDiRg9[fsZl#S߃7]v>?jQj#ݘtΏA#ݑ KUF/ ^Rz( ǎ =x!S ^x <'lrnq9k2&"u&Rm?[|9).n?$x,ЗsfӦn<f.uv٦UO4G4_F) Jm=DihM ^!cJ^x>k1Wꓟ_5>Ĺ3xPm , Ih;pOJe DGhpI8CH'oel~ j_5&=z}F-G5IM>;9q@󋻏v0$M ~l5W:R8llPk7cu*^SJ;T*)`Ta*T~ (א/& o#.1X% vttrsnw*dy[TgR1҆b_ WaBòNkcV\G|܂iI.zjZyZ!*:I|W\iH4{uZ&`ԝ Z@ T<Z%+PD,햒W1J&ڡBcp :g(2dQB &g X4|rLuFNø;!{P=9SCG%P&j 4YiwBfPRࢰ ;GGUJ횇eXɧ% E9|M*D<;h@DpJ[ȹ_Piv.#;.O<l[ I)|Dh-,ȓeUe==egK֮q5Ńj5U: s2SUR3&L!x"J½9E5sD  (C^{-TH?{Ʊ E&{u 0!잜 9}AM"B:1߷zHJ,c)1D4{zz."b6gF"Za&ՆqjXXM2BY eG{/ 3⬸<: O0{7 l/48Mގ Gl( :I2a hG2 'ۋD@ %6p[UY4c`{!EMI&eT"duY(!e~Qk8Oq<Ԯ&ڪGxRrD]]HR-%sLxoTAź 6kjVsE21CȊ2aML, c*INX g?F:| b58ueD="sKfb:}eU L̍g"j'Gd"p2U,g̀"8i%D.8㣧ѥb4L6IdEC@Ն!2ѣf\'YKJESM=.LPW\DddH\kd *T+ Fp.Soq?+oޝd>Ne](YqIax5uj=y?$0w1;k᫒N竤h~UeMN67 \lFhM9))Ь/7qBMTo/,9ZPD u42if,ZƁ VbZc#wPe[7]"Xx `I'I|1V(H'~y_m8&OR̈֗aN[#7 =_зK̲(~Z%q\p9I\ hҐLq$Ȓ'u^sinCfZ#8 L 63Z ʹL0 \`)+9.DgO2JR8 r.{a\X4!\rmJ`I2tUNG=;Beqőv*>~"ـl JId^V+tpҺ N[^*)ɿyN?.$+ut^)#df,ݐSjT+GI*jTZFP`9d WIjII YY}iI*x)CQ8N>h5x!ASMRRFBfu,h+@h4h%H2VoGV*K&gs_IDU1w2k% ݴұӥ;xr-.r$ )C [szS[2vRwIȡQ9vYqt1+ kvqjҴM qvHMل&WWd4Jfٖ\R,83Mql_2{ MV[2|ɭ^}"ctDBDKOZt! ]5mot0ij&-Rb |P׭3`̏mʾj}vGߙ/|~T^,dMx値qnz:!\V?_y9rߛۋ ֚fh gQc-3+K.ZL>a$Ly!J%27(CpNrK|ڳ,sV* qAC@MLuF}Wbp*6,OزevO׫U{U rf%`b}IàI}Z7ԺҺY=Mx6 wf!l庰EnwMG=5XF0Lv>U=wN?PU{:HfY#oݻooEs}]n>zSn¿/lϒgJIyu|r33oq3׵t,d,%FɄ xzk,uy►<9K}c`wGuogbv8,ƭ҆ksRy=qzpD{OCW;0F F EHp!%] #b#A&%knTN/1Jڠ(Uʒ[fI%LJ$2q 42Uޙ6;SNx.J/ -Z?Xڲj0#yf׿ψ߀oG^Vvy_tܰ2eهd9c*JkN6y=gzD8 "Y>=0|>A ga,,Z%{M%$F ^%upPYYQO w@ M)(H<ϵJ1HQq 20TNo<}#}lV^y73bNtpo-vPGT+WqS-3iJXt~g򉁇J^UG x yU*9 C %u=t~{ ==4) -GbLpMgLIbf*O0ej =TAB1_lT!!8{fkdp&sƀ.A}rDIZHf{'}2J*bv5C!CIe7v>J|^5z8xtKV. pΥJr؍9䂃-eQ!00~HޘƂ b.hcʖV`^!g!X)1;O?8[/WoϮ8 u6*Mgg鏥Si2e ]M /ҶfF87q-Yco?q7o.Jطiz[܂AQ/ \2MWmӅ]KZ~4}K.ž́:\G!Ԝ]c?]5\C̾TV֥ҝܭ U:XukxC64uNoHv]o}賂m]Bnmn~s,)yՕ$R],~x͡ؾ^`Lp{׷ {i{>x+w齻lSᶋqodqIdpq/_9Xb1iUo+-k_QA\L x0\{'Q_j{!O2~2Oz ⇼Xmby<[Z*vѧk/Uⷌco{—Z뭋o>U-~o{}wq]Nj)J[[E뤣eB|i\>y)Nq}~l:~L}7,?Mޞ_oʧ9cs}<|~M_-OL1h79ͩwLS\2ZB8(XTXN'xĖ1[ɀTY#4mh/+و iQd/4hXl<||'+_)]$EH?{WF, .vp{ `AIYHvb[-;nI۶;N7]dOzT2LAE]b"S q8TH>kұ,A)H(Ě6-mjm(="R+8*}Cn[}if٤ _z{{Xp$8@>ZXѠO: G_8L6GiF|ܮn>͝ZA~Gק_|_&&O*v_v_XaW۾A}˒_ NxYKFP{!QޣS% X{0;`y&ym ;onxuh ?@gz ?41k2pz[%%dO"l04AO&$]s/Xp }%ct3.$ %e!Id R CߘD _#_zE+z&&\/Ӏri>{QLG HVʤ#>I ސZ@5#XsyiJ^J}!+0_b c!L`*$Bܛ85N 핎}^~ To)o~Y>g(,'Q; YK˿g|G3ev޴,S0jwoj/ˇ ģ )+jY~ӏ?\voK7h1J$tU^wsU+VKJ ͕3h sUvŘ*1芥RR*=o\͜ArBNFoFs6b7g0ɍ>9kƙ df$%VĪOzi4/<רIyy fsM6<auy5J̦i~lƛ77/jOchRzHss 봹h(/TaC*}jGDaKS2F;ok"撵Rzpp0C|c#T^<#VY ja Ya%*'NJ͈EEGZfJ6YYclQC}&jChvP/'%Ps* ekr״p+`y!soGWQxUN[x !;!9 RX 5'2$^)cJ7Yài > Bp&"pn|)> K6;iHBH1T!<ӧUς(A9P`=1gr 0;@A9١1+YzgG=kWfTj%|]I2RHֱE|*i.)2Q9 htvyޑA&W.AT4P`ySĦ"! }qRy'QQ)g W"h` P7EO+L4l P uVFDVIJIb-)rl<`8z1{a1q 뫎}dx8ގw:ˤu.` (r"PWC\msbJiz/򩞕O-oTňhʋ 9@ً.ewѭA=S+0r_r_=kA/ub0>'DE\i\+qްp* a8I<' Q@{-fYABR0HRFJATw08<$.$EQHBR:<N"@S2ds^SΐFs7&Q| Z NJxC+CAJR30l(FHhBN NIї8q*/Cl:>5Ov&},:úrW*'JlSfȂȒ}t}c/!>i |ة5> D$d*k ] D`gk< 6_ M B(FX"$@R!"‰/@@WB(}P7٪@%rBlO:u%EJJbaVDd&  #'Sw1 :G:6TɨM'۶ׅ߆Ša9s`T.1q@.41chT&}!cCpSʧfÕOƆ%uy(XRr`(hKHA 2b"Y%0Ĕ{}4gg;t>Z.O_f![F?5ntQntL-͋cXFb2Tqy)vXZ/žgکRj;d3X0؎Ӌ1>X| `D ;gљc HYPLz)/T 2zmPD+|!d(x[/%0z@WGr (={ʑ劉%^j%aIJU`=;6nlz\ZbCqvKn;O端vw7PՔq(r~O*.-U/gݫuvUBnm^%Y~kp*%^ n_K7қ̷TzUfȍV%.,Gn/j.򥳲Ck%7J]歸62mwnxps)ȴkްx54W_uwjWerwt OO-Ӛ6,%>R"7Sp ˃Ph;{p Z6h TQdIښI$'}ϏS!yn3hDbmvdYGL9 "Fc5wVtz* ׫K$" ef!Os]'PM}]&Ehfq *=;齉n.~ӳ[kϭk7E =9q:0%^&Yl>^ ;^:hogZ3A *F1;Z =j@^r]b6~E2γI:Vu:Zꙛ*;-9e"hV>+AN 2B4^SDoQ%kB):%1J+ [r@%&aP&Ύ;i9M?_+@+Ďݯe9dzU#uE&g!~Xx%|ʾ8%CFEA:{B'69 ,£X:N`0 ߎapdcB$|_ hLʱXc&(!kgB`Ɏ:{o@tQR.dQϿ>9r*&].Js"%hz6 z^Lqw[~{~ҏsFtCVwwm?%lhcG{&z {v~' +ꈚ4)Z4 gd Ɋ.]fIf0<8 Y}ҡ2wּ?LhF2e@)#E.r&,O/@:wt8xf+_PkV^X F_2%Ȑd@A{17J*Mhѡ>CCۄ*PԷgw I]ȞRF*)s܌'RF;#MM؈*M; %%nݫ VSכK-XtBd?sAX$$]) Q(U2wKȝ%>8pt@z~dR<ϋ ye8(Χ"u&7bZ[-x?:4^H/ޏ~Γ}otTd?հ!#߆cwHEq.+{.O^n-Xo3]z=F@Kw]!]GO9f*7+C/9 XYƌ-owVvM㯳nso庪t;5ĺ]S%Y#ob5<@ׁP|rM"7\s.P49Σ9γ|ψ4$޽eQigK pŬûFOv+ގ[:xߎ?Y oa1f51+{˸yupv<\_[ѻ2)+mH_v#}0olM^Y`w O1Eo.(?ʯ;Cxt\^:>Wӛ? ބ àP&N3L,!X3)]c9C` E1$ےHmg*q9Q&SKsB ^ oPSD}#w~ ᫿| y>GL̸gƗ{B\>ٱ`!<< G+#0mu㞻T̐1j&h#( m/{f[1-M* Nfx04_N4oy{qz³xi./7lSaa?t>jp:.הsa$YP&2ͺhfu|dK4m(ۢ'x>Cζ: ^W}p7qpx hWs*187=0Y/"'Jќ[cI"!t K-zr[mIO$ Rωco LSqvIJ(H&˘Pʍ"6ёaYqGkT)H!MtugIwS Oe!;=*l/>Ry$Ӈ %R1㾃j/X~/?g5O~-~6On21iY3JQne}~"$+9u]Wfxja hrpRRL92\PX*@[6|3K޲r'Iљg^) D=BAwc,+kYꯍIyoKOeaH{s/Qt8k3ִhIinVx|U}\6/ ur E䟽&V0#)6gF\:6;˜lC;҃adS@26nX#׾&0CSy~s&q^lQ%t{mC";eg/-))%ڇb+$VI. <{s%m?5{3Sn{9'@Yz|pd{Q?9-mt[XZhW }^lP#"{@`ǃ^YY@WyYX8W2:D#uc`bҹVpb#VZ]ߔuz!} %qyTH]cZ 1b"iZ+Iomfl YkxSQ\>Inc1i[66X[SL=RzW]V?W5fc}E"per*X=Lk 97jܐ#*H0#zTc)Á 4ޙB #138dx ZO)s5m1%퐟 0{:,gmVOZ-|n˪N&p'Bt=6 T2ZE(Iij(Ȳcq pfӘG"h@Q6hQ9b0XB'x*8B2 KpcY/wY߀I9.FOQ8gH%gr)f%MuRX4/:xy9x8C ۳٨FD@P=sLr2B:u(;I0&-( yҲNÖWKpQXUÂϊ/`qMy':+3~30# nG`]:̈{s5newRR^z; K PEг1 IꥰV3,uJOʝ/m"Y/,6bIkw)U2X ڷX9XVk,ForWG~8CU~j>@ȲU*LȒ3522EuekI04V Q0!S&a}2 BiB k Ι2u^#r4֝ Ôs>?G)ԓaiB]裺<[Drj^. 阮,+o4SX|$贏a1`jט#F X#!GJ<!ĺI/:|tEPÌu;"E RrHd `pXDI$0E*CV؏f &O5QY#T<0@jA"@Z> V&T-? C-MXHY&b 5Xi0 ~\F0ZAjVAsDp0`1` iQ-My>Jg8<-^Sgp9=I2?$ l9APeE)+@ .i(Q xv{u" a >( k܇tSVop?RG0FO)NǧƧў?ijǛ*ׇU{!˱ͱSg7۬*}=I~D0X ɅbNxld<|uz:*ףP& -,I.zA| unyH|10℁Tl~ ۋ N_T!FDć]O`dIbhq %,jqWﳕ1Uwgi{ -W^N./Ζ;:;se>w׽,._VO_'/Bu-Q]R:.~WKWu͐fx}3&z!A5P~z6EݫIY*A:d]ucㅐyHQj KNV1Kj>-wo(R<|Y*.g;v g߿z~߾:sLy@ 'u]Ph.[g?n޴M5 ͚mҴnrՄoW kvw aU` x.+q+BXmһ1k4; =q'SAh:r T AFBpC20Jr[ttȷoQ *5 7H$1>cE+()MIP2 ;Khӭ :_]J)zr.':Lt #uit6>ZLt&UD՝h ok{Vxahj yxptlvpz_kKt7J( sjW P"x"R-Mf)ub_fHPR5{i'7 ^j{pAxa9J:>HBC|х##Q# 7 cDTQ*8add;1TӀ0h,⒠5B<UX d;c-,`ZFL&Z ih8Xw6<>ב>X (Ei4'f}-P\ȬJɑf CRH@.8 $`p(˽)װ6֝.Ra|4&Q:n2>$dK$0օh.gjr@ _HwIcj*c|eum͛F`jK ^Ȍ ^`A ƜJiXwkltac. B½y&dt-/Xn뱫= z؆;r 8$=8@QePK*U90FOP&K!#a:0p]ItLSY\cڍqǶZ5Y[ >!GN3%V'1לHc \ A:,"ٻ6ndWXzIR#h\U凬SIK\*\-)R!8Nji E?g8h`?| 4ssN&񁇲 6 Դ=dd!*Ě,2%ǘy.&\'*[v1qک@e`<D,"(EWbA"1xϴGD#+GZo!A@,\xhJT}(8Pg) iTI3 oaD,&#j8uTkdS\qQ<܂u` SQ*1 BD1Ƞ0Fŝd..n4E~ho9rG ~g`300ApCvAܞS*JOFY/d]k@֝ U xHe>9QmQCg}u;37i6юjO`q ). v!ڄ^eF2Yr "=&b>9SS |Z#rl9pK^MWqS[Z,4r?Aĵ}94So_kj]<3®{{~˵Wzq|jBL S )0dg횒e/?_ag[^?ymGon?! x)îD0n'{f;i?H) Y(t@]n!Ď|<ę/Y.'N?~PaAl#NeOS:7|fOy>,9x3ͯ_=*_ ,PK뤹o;uR<9u>kUgTKҩC9B.">ugAg)n'ͨ\s}0έZZ೼)wbQb=n͵qF,X 2tjJ.<ދ zmep^G'f{us^)Wo#S# Iq`YJUBp}_=Z}٧Ţůg٦9^}Q_ֿj)u(uKr/?fpOE}|ٺp:MlzY5.2ƾW߿$Pzs6:9r5Dϟ'x"^Ǝ-^ٖkm_`$]z`xѽy@BProdŴ[*gHŸNzi) * . {+ B ri) BB r(PLt_$C:DRl.x5Bͼ?ۜ>x_NM&z+Iݺ;9JH/}}5j "Ϳue$*V|jkL>N^oxbE&'ۖiiIFڤJ֬k-Xc:yUKlD/I=PR8zL436rz'e|0vC맒ֵ(*?qZi|ZE`.0',Ӄ?C;m\(A%#֣Pfg I%!r<4S]xCC+:{601`6F%yPз#$MWVG)qGl7%XPvlڼ0j[Jȉ7\QEPeX~l0"[D\= XjH=~ a ƲHH75I* Aq2θPFQ{4 r OFbq:vjj\GYJK6EYe-.x-X) F1PT) chA&8brfm!7b-.iǦx ;yԧ58Ǜz|v5$O99SF1|\xG`Z~ܴ딟n!w)`WGw/"!%c2vFֻ娘2O`w.|gY ^q4r̗J3ŽJJSĉDx -"Z4(Ad P\>(T{ ]IYÙ k`fY Ju06E :n<1ho;|&8|J1|Uc߷ھiCB&9 !/{kAcY@ p*!1pmL!MHZC*kHx&G1/ TF|]&%iek@$VPdxx: ~t iy^T3p1 XyY S;yE; ke'K2d})]QqgdSFΗipo4>~u|,K9\R\N/]4.X5p_f.;?sm}LWra߁m^jtOra#a}<ocTk$VG'$:sn-x#[_+wWYc\oc:GCvx>~;by56^^^on/?@!HDwZwa?9Ǟ1 ow8{]ëՒ{}ݙܞ>zɕ^.'Aasan~u]Obl:Aq1o> ij LcK\Ñ'u3Fi>(t2-ٻ6$W uy;m̴c0򔸦HFT( ȢQU/#2_DY«6Ϋ`KdSgt5&n:Nwl7Cipi®K_D/hWwh2B*9`}g_2QZ21DhΜb[[?9&ׅq* 0=ĤfXRb`!``eS/lfhD\[/, "!C$XX%Y4ҥx1F_g쐯q0|y{(T ֻŽ ӽc=bH$c<8ZN 9JO2Veu(86wԥ }1 ߌUh6얮Yx+%p&䵢'sKʩCCfĀ @ZW˺Ϸtj%__4e -۲Cw{Y7K;0L={V=2wW{N=ηt]:4Y˼p:f{m]w77m6aǏ(wS$Ryno0d}#+߼2W1i WI4h'hP,^ CTFdߑ!$![n%ND|MyEՊEQ^vFjL0ODeqŕ?!('@\Hku%75.~C*#RHks!"yjD_qV[tL dޛ][ zi݈WFunGUM$>9\ mciӥ7)'9e5 O{," H#X+D0\\YmZ3Dj5̆tըh-M!*'I'˘fIXJƒ 1rຎjJ%b>|NB>`~&+UY-"=Mm pR$eBZ K5eMN1OL6Id4r`LQa:<53m>= sNKoDFKLH0MD42}Lu6gɎkƙ@Neϔ&, fIZ[U,DnII|Yޱu&Ύz2]3PdJm̊ 8dVH,EJh"[;f?{8ݧn1:KUB>5VS*m i+Dc=$)/cNЛZZa1O$UA&Y ̜R%XFHFhIf"K pURIXwwa(/RJ8yrɐUXEi0Jj) |mݭl+Ëu6Dn|c[e߮,IpppKb4VbLɒl+`գ;Xc}ЎFMCt9..糺hRVMV O Q}eH %$d"l29:?+W5ǶJc$qJ+4ؒQz13Ql2y~^|.2.mZ9qզ 4[KyLϠ{F8ŝɱ,'0:%yt c'VoTN&bqj|L.-Let%g8&*d.۱Qߙ8;mϼn6k5)=zxj4x.vכCͷ9#~et{tq{}ϖSEBv6&5'L aهdQUEFEL"{J p,M :HfMRD|93)cYxdq4X Ihg3B^$]ZdBBL"`ufZs&A99S1;OR8NPXЙ8;{W+=%wFawWVa؞ldDˡknj|~Ƨ% mg! ${Jʴ)D Zl9)qF'pWFe=t<8гzzyjȃ 0p Lq3"§DɎ>xhR#O'ȃ #GI=]9[ %Iiɒ0IqiQZߙeAQJYϛtvW mB}QԵg7l.dGR 4Y'|0f#TbuP#3f ԢߘQmpGOS"ƿo=cѝYcm. a<Si" O07D~d)Ӓ 9c??Ѡx\M''pN7w?q_^'ЗXŪ_J0]42l(EAx5YJ;MlML-GW] T<(E*\tV1jv r+Ғ*;GhQqxv N︜!KkKŰe[T^Mw>HQaI?/n C^հ~?bxWd:nTY3/.=.{;\frc Hnj6څ4xkmao^Fj{{A(ni)Rjcᴨ˩)?xAɷL'u_+x?->u6YN'_'n#~ U><¥d6aZ_F4/iM:$XA9jJu( $$'~Ig&,iIٖ.TrB  sԐs4urٗ;`[ws.Ntޅ$4h UEv>,tų̦yo$}#jL#=Z~)wW.;ו&&fn``J߱{Nޒm|뎋 `:6hpO4cm†Sn:6q7mzDkf^#uB m=v]Y[L@!X=Wn: |Мy?Z /=[Lpb9k߽-ꕊeYUd^LPrVe:Y6bgSYј9Ư^jkc+؈GW ]KαprpRJ3+exjP` \V<i9W(%뭫W!#+XGW(Pڻ^=\r=\Wє<& VkH[W(%U=\=C0K z&wq@kou7VN6UN#x-m(:qE>x QZu*;Fh1w'v{|GYKx! b2N2r @3~Kk|ݻcG(6/t 5bq4훼 混olx/Kf)w4ؒ(:nV 攮gR},o6ZVV۸Azm[ڒ!XDA@WŬ)"YRH[P9t Kk3ʳoqJsy8W7Uiӣ?F7cVz-Kũ/Etܪ2X8QDw"yd) 9 R$h/&dtKJ}Lr+,!,HHScDe%q)) B֥Hl,"+!d`$PO1&d潆&L3@Bܜlk"h%2׊F[\0r= uE`òzP*]7jf^ٻ6rcWXyI0.8V%y&'.. DjEz?E!) )y\%[  }F 3T"XcMBF%4Bb::ʼnN{rӝt':9pL>*#pRjE+reMQ/Db{ː@*1b eHч8rAL69̉.Ž+j1-F}J0_Z.~4E_~gZIOמ~AX D{)T$E1*Cf`NI2Jdo!PZ2$A@ʊZm8;|_vpo>Tjr9%q"Fӛa)'DX}}slLN[~gF5BR{j__.4Z hxeT2+J$Nd>oJ2:5*JEa\$ U-&Rmmʒ 2=mʤHs>m geUj-\Vz -|@.eǂ;.D|{g74rr8pUԠBB-Ğ 9x ͉{qd &CR)cP.K G835;Gibծ&Zz#؍d`A9L.A&9%.! 0!DAg 6+mHլ@ !bQ![E-Pب 8(&Hal-_)T,b5"QX23L1 dh!GCJF $"}JFID^":]ֆd@bgB 4:,H1`$"Dl gElI@vq zRZR]ԕbo7/}.3BgQʈtRZ9!HϓΆS,&)..=&C[|%-,wFn>mgnFF;G?)1f3:CL>3e>34ZQ ՟!;NQ=X^Ք{Uh#l dQ_DF8 0YR[Oii}aⶕmUx/4UbqԚ:_Q7dž oK%8)A|s%~M],,@Qb i LdW %!2'H4N `&ӑY3s}߾c(T*ߔR;:gxaxNT|Κy.C]OIxXuK7T|l?9cܻ*f{jzԎO1`:9ϣ.فzw N<ϬW=nuP2p%p<'`1%p!d .xdކ3A/qe&T !Je2{D.K MH4(oi,R* r@Xk[m8;rP$!ƹQF2$b8Nm^ƒ{V-.o1.%Zjw;-.%==2q3rxr\`9!DN¯N{υȒBɘg"nA:_u`+uϠM7~:YgS% )"+ Dd'25*`@oO{{>EG8Zb\zGYw1x-U͘t>F "Ә,:֥`JyAo=/,ziiE"DR*.] Qj4OtѪ!-% XGL&P+R]E4SZpAC{rv!SIC&z #a4PjLYZ=a6ǭ-wAKG b^Fmw7%[>/?#xK=NOmx6fA2??,T4hr#o,`/$&睎Rig}^y5?.(|}>_=fݳKomMJ`8qK˘8fgx' &S2WgxſV3+]>2c&Ld*n_~L6nQ[csv9i^Xyp[Ѿ?-7o^OW?jeJ1΄)AwgqE_?;'F.HEIqg;4{08>鿭_+߷^^?x  X6Jw;+h_Npi3R0!YtG>Jb HI:z NM N?^LڔZ2gQ9e9!I`%i 㗎Tm2$upqu5|%iVǣ~~ex=%V/Zʠ+w?<'i«m7`|CMGK0:)C|5Zޗ݀-iKx9$k-O_o*b&<дj`B qЩԟnZOm ]HB;zHZ{`"r9Q.q#ƨRJTNxGvA6fB :W<%8|4ηԻ|s&Ux&-nK"+1@BY36I rB_Ntf+oSR}4-HzӥNZ{|; ,dyM#b"dKv3(A@fzs}Y'QS O5:-x_y^Zydu!gЀbp.ifLb:9Eh(⸳YS˝@^(RR RP`j`q~FUHJF,i1yk7A>#V3>q@҉tQĔ|>x@JAXSʊ HԒYbv:9pvOx0H/r-(qY nR?qՓ KKj?&}OoZ*B\{tt$tdK:a察W?ahza%3+tCßd :hƠf@9iK}d5`heIn郐 ,t. 1/# #Ch2,w=+$Yp Ò\}IOpL tedXm8;Fޝ(RAu_ cfevKUx땟ơM!͵/@hsӥvrFà twCw_nfwons4-al ҝ ݹ(U{oo-ܹYn -n>[v̻xw;v[m- Ye{ 薢xŭƘ{~3*϶̭G@n֊#}(VY2/3Lmu8$]Jo.~y *HG7x׀ .,CfJC}'|;ON§ %VI:kgm8L,)9f9J!S`Oe/\F i@\E B]٨BdV{*0i+ g]>%➉D)"<Ͻ+7sT\[73S(DYuHWU`[30Ukzǫh~UDeXMF0.5NyAJ.Ȭ귄`KOH!E-De)bɘdKzqт Vĝ)I|{l`pIV(%6ȯspFrk٪5툲S\eih!tȴl CTv#.òBr@#O .6"0ХJV;)53 jܯb4׳sQQI*;Kو$V!QᥰO3xsE|DY)c9AhDBqLE. XJ杬gՆ^:U.Jb6 PKY A%2 Vwl`OIS7GP`,1H^b6!Z pd(B9bE̬u*u=ԪI=OTQ(M^kbtySx$-)e0^JCC8,dd1xewJI`RBfZ9]v C I+@1V#Bc+/Jj;zޕ,B /3;L0I >m!ёwjeGdؖf٬UuCaHdW3߲fF"֊j<ՋD94̐=BBPtfc ƖaZQȮ:=AI]3= Ue# RqpH&lHU U%Z[Ub\B&{z-__dGEuHx#Rj̪#WT7#AN^qԄq$.DeK0*B2x*:RVE8qzU\ZET>jqb"R&}%85K|`+2rX>śϊXIBı3s?#t>{YWafXVX,x]I†ndmh}RuSɩ˚eI%a$_2楡A: ,Kx .%%C$)G|\!f.aS)$%NsǺur(joFɔR8m0RLc1>AAE Notk%)Qsp%㲄f8й" KCM( 4JB`#r=eXaߡl.Vܗ[3JѤx5"C\)+.cTj$)U\3g6d^FcD2Y+냉KM-a$EVHwll8[*:kOF/EZǠoo.M=zn_'COV[{MLݳWg]W٫6JX AGI(%#Ѻ` 1O<`Y92KHDˇf G0HljVA u%MŹ62UpPB`o "D- Zv?%T8 y JGGIY,-,xGI4 G: g,K椽Cza*-6ViQ*]~ly2'}U*Y AXJ@U HG8m3$'Q1!c d 0RxO7g4,zZEOr1TGř'gYcNzc`@,Uںbls>ۋ]Ntd[g}~#Dݰ?X|ſ$~J x?}iEe-^2n]i$'%mBzJwśC+~OY׍^Wh2{d4>m{$T}z/xI?e}VC/WaMUi)KJ&+rd9(XA_Ro !r\b|hԥ $~S&ǫZs+-o \_qQpV)B,_ڛ}8IG51ӳF?aĿx@旭_o2Ӌ,oKjٺM9o1-*LٞШn_mwh6Tj=ƓF集{ؔnlI2AM佬3hcަDŽf5iER- .nM;ݚrV i]XOuhWP3s3OOn^xFynsJl ʷQ31p K$1΢4f2ycJhk@v]`@8I/ 2D 52S) &Jbe7B/QA6vLPEgw:kRڞC(D^iP؋l: ooqae,:uy?´y ,$^ҥVpb#VôׄioUK޺,:Fv =#;A0ɵ r B($<3c4WL#,8%i~ W] \ap}mC1 OgaX̠]Z"Ӯ`:.W$WԷݼ~> [XVݎÎUc=K>M˝P I/},-L+AK̸$XJ Wn>Cz))%k*ߧ|_/  xF׿M#*jHUՙSG/ȯfja*ÿhrqs|x ,5p}?mw 4rQh|QL #KwܯvYݧ_*sԫѓ ~ |YXtƒs8iz 68$/DO_{-WUW?עלyY /jq%r*y1 g/|Q^m'\n̞@h>>\,J;zsܸ3f n&pI+SV1Ǣ5&LVI9x0|pU U[&-KR.'DL$%X(UkzJsnD9MޜCړFFJksW$mw ]ggMmQ%q37ܟ+IpTmQ(X%>2D</k8f8"t[EcF1ZleFQFGS?`Pr;f^OFYl dy:]cYܠlX'6o33@_Bl TgX{vػz6ɴƎ$Y9S)(4tb8 tٜ[UYG-6`M)EJec"ލy1}lpiI5݀p#6t%UUf)uReX@7.򃷷׃9<գ 7՛/<ڷb~gBt`OHtX 涱&N[xvSldvtz>7ݜsk{MuݽZKImW>:&1?\c}ƁIV yը5b{7$Kϯ~wo, snգ >bϷٽ{u-6w:ޯUނ, h3Ǯ]U@xYnMwoyێWMQ?8I*BI`)%*-gc[u.R4h7¢B'ܫ 2,W!K=7uBKHDҰU֜uG1a:J1A^f鲩3Gڣvߺ&ZhP;/3켷[{|80M]}CYy[jJ|tK ~Rtt!@'i `āt1$BsKg!Q! fKVlC8ɖUD7E;ĝKAU]ăCqNC2`)B*i4!lF20p$g'zٛ5>=$䋡㙄߿zcjmǢ T: hJ`U,r.I*Q_K_]w޾=˻Y,@ϽUL? DuRĬ@}1AA @//|q$_\3rzѐgKKSL3#hz 訰AUPg @lMKdcRo%XYw[2D +-V lW> f. I%JJ#k&[ԢcM1(׺3671P,n^~|L9{oi6O$4$ PbNY9IپQ팫5dynI^IS>wvnlQyW`t8g-er,dlt윍r?渓;oshbUnﶴ|ekRMZRw\ {eC.;&@r6Y䢳dH`l}Pd)RSRm =}J JkXmMԚ0V R#clJX2s,4S,|Xx=vo IVr,I\Q/7pύ'a׵nk~9?S oZ4җۓǗ~Drf3U 鵔l%?mO%0 2^'fíY,ee3ẼߡCvB%xA 7\TG؎ט)oiƝMfly{='k,S; Zu :&hĄuZeCam5zm.B2E"!UjOc 8zӎ8*y!pv>K12rDIڄUJⶥP\fS@&ܤEiu-;pwW`ݍud6}Yli텖4jݬ&D,$>B5h~ %G S}"O|)|ؕ!O++T?[|2vݕnPTcSlwiK$vXl/a%z8KԪ T\>TMpZI SPV䔫ʃ>b'g+lf1k8A}_#(k+b Ձ5ʽjarhL>JEbuVU Y@˦(5\*871 ֡a1hZ|@2L/^*t` 0BwQ0.#!DDECbNjw,}:zZ﬿aBP"[!\z[cqPҥV9[|L;N1J t$vSNŔY(,i'\HX]pv?, Hc*AzL#m3n1O?~+Vqqu*}YO9u%M`-}~){YGpZ-T_&G@WZdP69&M!T %UM VYcڑ.VL[ʵlW>yz&W*2&͍AK jS5`1ێXuF݆R2gDP|޷{T68\N[^»7ݯ 7m~?br8 mnaȟ\[A^zzs8g-^_DƧhPUݛx|sƗU|vh֕dC{o7m.62-ߔ[^n?!˼sMsuKw.X d7<'oݗ4[07wƒpUC wId]D=TÀB6zi*hi"ODGSNsd?r&VFU LŊ;}G5UJ.&QpV ]/bZxƑ_ 6r0fp\@IT# ߯Jmwn[ DYUdi["66u&Ԙ-: *cSq b\&̟I(6}IuSpj,&4*"_y}8ϭ]F:Znm7S0SnKU#W^ŭ0.Yd^Vo`$·{: x:G2Zyˍ]Z]6˾~i:+Tf5RDZ6uhIF1HZGF5{pvlqN6(}{Qt 菳ev}kiӷΖㇻ6*&ަ ,boTcG5{D$ܨDz#FĚؗw>wO C :I@mʙH(l|JBKDLqF(8B<f:k'Q K(ҏ  2wFE]d(&v#"#2cghR#Nb"hxA"{ixESN{c<D82,:Qx=#,|ԛ1kFiGJya4]<~S68d[ % jo(_ šӒ p 'xcn/AvW_iTr ץLR KbY`L6뷕"0y6XESp5XW4Oj=j-A),}@ɭ/`I$xc& bdb\8Ev_g V$7EܢRTa\ը,?[-|0ZfqWt6nY3On4~Z_$8,7Y-ѨN["َ7Z:G w bC(4~GPjmip N:^~˨t||~^o}Sks&߿揇3t· nï?O_%$yIlFtY ИXı$x\HCGTҐ:6tG, }{5d;[[[Qnx9*["ifEI%ӛ8˛єvE^P'HǝqTst:3 5M[4Rkfmw'ſO |#c*w4Lɋ#C(j`nxkZv|`>}Ȑ6 Tkzs֙A95ȻIRso|}٩dhb:tvhrLGoxn͑(U4u"!yp$TKM˥TRP8/53YhYTZk r <9-7QĨ\C͍u ™k8"֎2HfH{`[u1wc}Oܚ [U>/hQ7؏[\r[.acw.5>ϖcYLYY~)&yz <[Z8w9`F25\F}_~h<.rP+0>^d K@Pϲ!F_-g|ٓb.<64#2&w4Hz}g 3ou<)g5ڌEu/맙7/ǫIY@)m0^7|w~y3|*HHy}ձzce_-ΧAaٷEKt ]\tW vBtutŭWڏMb5O//뀀1XN gPΚH%!g'?V> rƠssn4QxbCb)tp# 4/1&Y"0]AVJ$4`5G e Q_{0wk*ʓ i>^7*rv~B\*}d*C~ @/"OQ֋ξ(- uh_O3~ˣ j2E<#KH%STʄj ^,|'.n)*!) r%8*j6x嗯gsWѵ몶iM)ZgYe 2|޹A}hm,5l錥pygKD~K QJ[j'hiinԨ[-ד˻l8TR+(T<&'#ސH;s/FY|qfsS3X*?I2,ZVXZj:VG^r0m@iNÆf~zv`.neas~ii[Dyi%iT+1ZK7[-?: ^ea >#PpW3M!wJ,/|0++DɹMqAy N°0'epG? TB !b.ϻ˼UxȽ0Bo-^>?y}H mSrdsV-!{4:QvH0ωeqE`u :P`'%ņ;)";3ޢB=z2Wzɦ,X_͂Po|w7TU՗"Žtgy4Zmsei-S5MUNaU|ъZyU5sPukb [P9%ɀj] CocɭSJuJڱH +"JzoEݕC,<5Hހ(̥v "`nJgk,7­Wƭ%w Ԋṟ*/p^񖝆'{ӤjU}/s{U?paɴJXY45YB\jj|Zۇ&Zt3TƲVC*!OY5N iVJUE4KE UÛt\-N!ߑ^lL/[ʥ UWWtfG U>6cTd]BKv#9Ng켆E1bȡ8$G S|#C!ڣNA wk]U֘!/#ҏo7>yݰZHR,{M+DizJ)ݡ+dg jBF=] ]ieL l`?LStp֯ "Jaz:A21!;DW8Wխ )ҕJxmR06d& FGv0Px,C޳tFZ4}4m53״隻NtvMwt}h㕉Ur-wKe*v2$+_dsIKf@-81s?cKp^ocom(M4մTv.DCtutePDMOJ@LM(6?ٿOBς\/713t3 S[9/Qh:%7ė`5Kpwc ?,:] ['^"4Z^QAx8\|v]@η[:RIHo[Kye櫊8VW:q-|x~_\8&Ր D)Mce*|ʩRY"J)93S\J>?_ |kNWFSW[:oafރ퐽"eS0xqU,/M_=%gL-eLU\⃗ny$3#ɵ^V8xGkXf n~fV18ߜߚqٜ;B@”T7m~XǹP;OH,S'e1,'"Mwi)x$MLt|rZ6 (SJE婮1o3Y}{sޱ1 y X&n,4vdXdnW{b}CeJ-3eۈ?zpb0h&2@t2kۨ|S)19 ǃ?*0iRK6hC|&sue:xdǫdT^Je׵g^%.gl˨cytpH.j;,.)C~|/_Kp_ߋ/? )Ru4'݅>鿾M{٪ino4:MӾ娗]#T!O]]^0+K_JPdCTo/:x^?#'HKpH EFLjhZ A46[RY;dpH_ٰ2.]=陸~枎֩5?_VJ2 "IJUɥ `Kv! {:Mtf4<F|L3 {;r$[/?,446o4o6 MOs=);w]xU a8ۇR{ C's¸N18NT D^C=JF=.hSВ|`}3_|5#R4ږo]Ӆ0x`b~f HtjfAuD9dՙ, H=/ޑdG$I`J(M%Cf>v_=lL^$ٲ 2ԓ#wĘS" EmX %hlӘ$7Ζy%}b=ٗ͌Dr i &O*R(h)r$΅P(cbV˳0mYP3a (>.iY D*0sVXQugқJ fL$\}Ii&';&Ϩ3Kg~;w ;M7쨫29kkTtȸO-uZ60>,iGbhN k i3[ 2ug #Kaa%6!ʮ,e# ) Tkج;-c;6]m!6xOj {T6x zqm\>`v5Klx7ta<~߸:&Ա("$fO ̽ 6YA#8]άâx* l"7ʨlg[ (Oo"uwf[ugŎI^bIǮV[5`!g%++`Qe*;bKD6I@oBQ@lTn`6Ui!32YtBlk2d%j8b2j՟Ɇ6f{ؓUS5bǮQ7`A/A9%pKtS> EE(AcR&Z9Gj|ԃwRcsc /JVAv&ȽJ-K4( VTZj֝-RfO_ud8ʵWVR]49Ń]|4 ZPeQJ)"N2c#8'{S8tj]c{F먰5]Fnu`osd- яn-sU_dY%_}PgRg7h`ԁ) ֆNN<B>qz!5yzλ{k&Q}& R%cV39RAɥ'MSddG?swZH\QZ+xr~`A;սZ|rz n,Sb^9'rNDVۜupQ -I!C^J{רkYe^/pH':~M/S}oOUJazu;r]puڼejn#69;#\aR#"+>w^EG-S2U={{A4e:^% $Bz'c'ފZf6gRR6V+8B$E/@K~0?uSl4_;dwΠMt`q ~uQ3j 11O.0|NZG4,S2D (6ܕCVGr!pX'a!+ur}I/mϹ `,$y]`#k@∬3&౱5Ζz5}9ȥet)Y¤#ZNuG2+U<ڵȑ 4P$4[gcҵ$)dcrXJ1*U1̊E8uBPRJ0%V3O:;kT I)p$gDZR@hxp41{uFr3FHxԣCd1QQ)FךZA`?1ת X@mk]^PWE'﨑|jz yd=j|x2LY (iNwӶSug,]}7 ޕ$ٿR]˝AX`nc=L{SLjF((H2`JVE2"32V;X EbٹlwYFN:.)W(_I  S@w`ăN)`@ 4:HOeNV(r (,~uȎT$ ~Ytx$pm5B5l W e1uy.''5z ?[jG X(4JpAU/CΩ뾩SvŇt~/٘^$SKC- 0e; H+CΫ6~Ƌdݡ+Id"h)Adq%x9Rp^t_?E;-N\SXʥJ/U]d!Fq٨bHd $cF81py9x>(DnD'-| |EL12F+=11o)q611&mk4qw2!o|&if(d/scB7wNH:Z\8f€I$y}߽y\WNRJdR;g͌ л{UWY؉/Uг̶z6_ 8`JUKp6 ^֗9tpF^4guK½[Z-޾ wjwNCoMha%8g c.%q|/tZR3<26lV"10-yxrjQzy*S GHt"Ls,Cc,8U-|أ!?uag/[Ow<ts K!76O+/mOd/@ 2.t crVz9;(s܍Y=JPr6c75'/ Y+%7pyufoES|E=zevbja|14՟v}M:zoYliQd*ϭ[Jܴh﹔xߜ1myerro+O? 8`-vKaH"(=9G~Itgqi`"uV֘贶ZI;9V(r#~4%ac6RcA @ * $\', } F9JFRT<7Mؔ8[6pN/J}YX+fV:#-C4;3 3֫l;|ՈGW7Vٰ/mhM F,w<1hB2TghGn^W-Q\MN2jm]cчYcϚukjorڠy#!NXHcQH6f< 漴"x%XkjR,~f1Njɼf~\j ȋp8 l9U$dgcYs4AeQG5{DDnT$/$(4"p,E Ot̚6sf3RH?`R`%$)G{n6q;;?z.<K>n0< ;%mc=02{Jʴ)9D Zl9)qF'pWF(;ixig-t!2qC`uT>M%LvL(C!25yA4]?Y] 6ᑑZo9/RZprK8I!$&Kf$>JƥuFi}cZIF+ejZm& 4^FQӞݙ[yHB6d/5u|f$sPnBK*17fvnG$gTܺSCN+t'tkO ap֟NQv?(>Ngx(WY Cd%oBGΘ+^OG8{A[߿yݫ~e5І*V@P颞~f􇕈"^,H1"n<ø1tIw5 /.Wͅ8v .qXnV? muV[Ͽx꣄;f Q*_(9T11=Hњ/Sb8INw>+kǟFEe]?{UOUmvw_n GL]z~3MO LѩfZҘ JAB;NrC_g{x&oRʒԙX mK%,B@@x!WN#jotrb7=٥`[usOtޅO04߄*?GUhʏ"/c \G5rӿ =#Z~;9; ߛNr x2̓4L=PawZ{|\>hpO_ܳH15 չ- ,ev[?lԧ}ﻯ[?c٥Փnu衫Cug|vgۢLJrP1[ah6'i\Jf.xqqۢkf~|dQIJjhk'dh";@%!S `p&hYh+m&%܃u1Bwz@6mZ)InHo=$=Icj+AvIy5ۖwwi_uS*4 ibV^oC#W(߽Ʊg}~i0YzBw_!^g]^ad]ޖ^Z\-:լ0V&+w7'z$Kp ;ʪ gVi"!my?FXrH\&*r)ޖI`^'Y]ߕus`jJm\/IZ!:2D]%c{EL9S)2FXBslfl#;0Z?' )n]_f?_`1~ywuv[H\B]8M'ۊ׮|K=g xDq#Yi鍀 "M6h$1+(zRIv1!Y(p41s㢅?}\lpv=eH7vbqBWIC,trZ'a<:$H8s73 `Qd@ ]|]Lц'2{URu%.Ijg>QM`&'G5!1:(U5sjg}zdy͉WcvbFlޯ<,),y.1?LLt/5^#(b*qHV ³Ke0|fh nswA?OGi7q>`{z7iݯu\fMЋJ2E8Hoǀ.Uq.o7sٞ=*PN> mZ˭/;[55YϗrY jogom{wb-fk}t4/(^}S̠z:KRd^c^!~Qh,eʠ3-~烻i\ZZW(%W !'WXx fDP\FW(-WW(0\=AI[W 5pjBi9?vB)% •R["luji-9+vkWOp-JP}O^H •f"ҌSZW(.#m+V3R*+Ô mZBm{vA\X[ G3Rj+ˌemrQ`Ӟ+";ex* dՓ+i]ЕS\`z(UWZ Bbg5<ad 'y+y1׊هΩ#KJb z/,%Āvvtr֯*J^ E txT :c. = 5RJ%"r2TX"Jζ]*VV[Eپb?ެ\Nb G|g  5ՊA9rH}L?'woqDO;VORcCe;O!]O>n\ʶP\W 䤃'Wk+uqsMl[ \=E&-㭁+ӶJ+J)UWO` | 5-p%#+ \I!m\RP\+W b 'WJqW%+J+JXWO4匵i V d@quk+c+Қ\x4qWE]EԔ5ϋ˳~k H M|43ܛ:vUY ]E~~Ei̗erpj9_OPp_+ )_aê-I S- }pluo63{/6Jk/^ǫsEe>JnX[~:\=jp3 yOF!W-wq,5/F8 I ⢟2!SKr-kDY˲K>ujON 'Gy"E}~@ç8 E͏s3{.G+w?fc0qzb{!G~JI=#lqqtmf%V^WY%c+8// #py}|⼷3r_7'TLǧz 7#T÷D=*);kZS鬜%QK\A ׃[ ɔ\HnUK%htZ^g\ȥ4S5|ToFPatlE#V쇅4QC5Jstj! X&Ut(Qom΄1h)E H~I;"DKԩQU FD]40MQr9C1~-6ڔ,7۷WTs4sc}Ni53\dlr=Pr]S3RœDhY{n3b.;Lf ИU7T3F+PtRU=QKʇ<wDF0S"s^Y{inmS1Jt@=hÍ`[Ɣ%cM!:F20T`9MB11ИU6F 26{WߨGJ<D^WT|_;ɵ*6YtT:DyKP[P)}|ޜyVUT/i>s<:RIͺaհ)=Hn$]Q^wF$.q^&΄|`5 j}ITp,ֺ[ݰTYOkh^VIC2j(UÆV9[0f %xD͂U.E{V+= RAvT5JZGviE餍](P!D`%R(- O9@EE; 0O#4/ h;oPR6,J2Tbw%.A2hE ǖژ[Ck+.t-a6tGV1- YZec`T򙱕͘` HJƬG6lm9ԠMEX m {Ze3@PX|/H1Ű$؁~oktufWHiRTWLHpm MiI +QY[4 U TW7"j,8H&`.mO J+Y1w|э!ՠPw^KCp2P(SP|"(^0,JLhW4 ~DUuASPbt,,x/:M;*q b LN QCF:|\6 d&/sAGM-V│D]`#)#͂EUPhϒ (Ez@?PiW ()X|A*AYrv{YdV%׽6yQ,f^$W $$ eFjuV":J(k! e 1P #qҰI&}EՊX{Buf; -=ڛ^,KT;f1*Tei5 !pBK.}?<`7zruqϧvy'x] ިa}Qu`mFL`-$ >:gAuPyP\J_6#ߕ&*fFVW 2r i(yG a (hQ( {By[ɐ$RQd"5B5 C0U]XTBNu~d}:MOU;:@vTVB]+!Hb!a|{g''y]D]eb:\yS0+tmi,#z  rIn ȋE|'kPKB*q`(#(vPA~,hpⶦ3*Z.fX;6@D]C:x Xb[T30be@0 9qVH bl[N,UhE =i  j@eV3 o=JP)FC;~T*"1 2ߢn$l& -/p5,A ?r7ǩ ]el8X? nzyxNi/N zLŚA.nn8c`3 =k(اn ;Z,Fvݚg9&mF(γF9@[Fm1PN2zr~4\}EeF٤#AI'? 9k9lzBg 378w(]tR"TP=`C(uAAJ@25di3 A)>֡[6q1옍}u`?nw+AZ964NB0C_&o Q0 bp\ 1\U -Nu#guK[sO*68&գ-19mp@୭EѼR'5֢Y u(P5jI:Lj='е-9V#/{*6xu|k΁ >bѫ=ڡ*`)o"a2نf@2~.O 4%鰀l*AنFp*qK^RZG7$ЭuE< \T*̓ .Fr\Umo1˱XTZIPpԅ.qf@Bׄru3B?]wԢ`| c~/tݮ-ggu (aՉSV3\#`oGqի'/ 4tKLsZ|v~ӆp >ӻ_hYUTz W|O#pOJp W)bÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J W_pYM9 h!L*n^Ip%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕeѝ_6߼:ꭋz?vް@XmMX?Bfa50 aHڈ?9>i5tVs~ xb ]=Gr(o {lnqe#f6߭ݵ"v7`uܑGnIͫvs~vU z?&m&6;R/r4Vx釷+[<>6Tm}sZ{θ9WdW l10ۜbW~{uy]>SˠMi2>DVf'J8/ޟVη7᧽|}!ğyÿ~MjRdblTT{z.͇é?3 _{&࿮nZ<(͟n ZtC^)[ ]1BW@+Fyp+C]]0jZ ]1Z³:b!m _7|tҴ )B4*4no{4аXc z:0 k)0LmJTc4ħvoFٍN뛺1QUi*$LgާL =~83yu+wzϐ}gXgن_&v T]ϾϷ]oNJ9ƉH)_vwo.tgmy?GAw#.Kd0)HɎ )CRTKb1 iTWUzoС ~-7O _]“eŸRLıQro)ǗFmWY$Zսl`n6,&3l Z.<"q>R&*w lF"T >T/t]rӕ*Y1'&'^2[RTH%FN46 VwPÙI_o;_?ά6uP;r2Z^g] bZ ڰ/ hXfQ6eo$a, 2rm*-\lYW5GݺX>ɹ~TSVy&F0Xn-x\:xfO"Ic;T&?:>Ty8N3T \J-lb ;r`95@u=mxOEK~d $e SӔblܢHcJ#a؂HDR"3"lG%2t)B|$(/faraac3\nؕc 3'm:\-lʮ\$bpv@ fvO׵>tQ :I|>Evwz毀 q1av_Zкj]Tww9tf7 ,;,jn=]{u׻r7?&5whhOlOq?_Ϻ=ש˪52o:9X~tw\ƛ^yL%d[_6ݪ &^զNw|{:mBֆ;\6sK {FnukMϭ͟7gLo囗phj> gW,Ip$V;K$1LI 'NNIg'䤳pҵӖ#Ҋ^8 7RjDH5|Q@{%۪@n6'=GNz6rؾ)98}q1mx{wU|Y@zBn S_;0:6ALW M~w#X=ԻӞ<QIOҝ8Kyr({ iL1(*l,bP`RBs# B RL8j$'%Lx%[/OZVZr ]=8 b# TE-ZjdHKdPkI0Q0V:d I *P j4!13 φF~8~kmrϘ3/l$BrضF_)ʹ<}G؅Jwl3r{G(:z_wKr=8mI/1 bX$xqĽ(M6נ`AM'$)$3bVyԂ5R.[ip"H&104 F%B"D)59!WQ 6Pd4,g9PJYɨ] CBUkQpuR)hz6E0b{Ƣ2!o^.O=V'UBEiQbesjKk$)vawш: jŨ:?i%y8+ww\]9‰C ! frdAH # ;HXDJ%(),c=FAMo,b86M)1G``Xu;6VH.R3)%BR4w,ƶCQ{0rH4_iPj*(r &<@Rx#QY>a/(r0Ԅ` C`bilDBOnys#-l}Xum_V3ѻ.帴4{6֝⚾mQ~e?gC jD )RV\:Ǩ$)ցK9D|;-B0#BXYL^jʈhA ## tlgM>}.3Hnhh[ En7T~9kH4[>NՃΙ..>\sQS,FHYh]PRgJxT,ZPNRiO` 'Zב%MTpPB`oapeA˼XH>UchpNJ7bi1g;Ja8Y1d#wNl,^ɻJ&穻ZONQD]?`ǠƗ-_iu6tY`.xN,sHr2A@F ɣx&wUtit g&%2rZz)bq&ࠢ!Iu1u'IpݸQMNmSgO9R)/Qr9Ĺ*rp9\%>ԟd(})סE?ru'P۳6՛'2^ʡē׍J=s5TGř䘧Lc5N~c`@,UںblsSwW"""}~~["Dx8j> ,Jz.qr9ׯ{_dP"a FrR&(2^ GYb'}7H -wbr_|/ "*_i)m#Wa|Qz^U鵱ތFrrf=ぁi$xtpa9eВ27fEd7cp,Ei7l-,/{/'U[}kBAL6OIҗ.ʸb2}3*[ӕ/Y2c _iuxĽz}$={*{sC0f4F@:ߗI~za>'~IA'_1_T~.Uǻld9srտ0_ߧO>TODzܟWՍɬ?Y_eF䧰?LJe4EǒI +rNk(2h?FyB(_0ua 9@@Ay5I]R k{N:W-/ʀbtsn:kbc$W?)|6ehmqH_Zff>d2o1AeaoxUd_UXK0|;21s&vo7ÀԊ:sii}Ton-LA%( Ä1y&ua侏H7]ܙw.l,&$"rtCǮ3X8ߏ`@jp#WGwoz#P۠Q= 2(PR,%9+5rV`W~ `,6 ^Gm+մl@[ոեۯKګ:wޕ*y@󢚻J#wx^)><7*gYE")xwhؔ(a+TELee)P>y.oE.֊$z: 9W ,cN,\ ʚ>H`mJrӪ>v+^M,f3ISS'J}Tث]>S8g=nTW O?6:Kҧ f:PyVx%SrZƤu> X:i[:m+X*{0Erws DFg* BbKeiyP#%%0x")E| {F3Yp*ju6&ΎBvhQwC 5&c@f>S JA}΁1RZ%A2լ{i+U+lCYSLN^DHH0f&4lPeNeɴxhzcEӓ}6m.?#wCɢ:?trVGn7_?.(9]^JbJ} vÔ -MGUx%vZ!V9Ơ2Z$ET(2*bJ1fYUJ* F꘭ѥhrFST\\RM#cclGJcXؘf싅0B?Xfu$0 U1$;0 m^wʍFt#A2`.If4)Ng ˡAix@F(x/@j¨:onjmBrC,hRGSlGl?5-ݘv0jc=OYf2FG+vѥtZsLx2$|,Ri25#9L̐#y&%0Aq$+Ě9I76&v<#ۂǾ(FD#bF CBFC0HÔatB-S}J a!%("ZS7N۬4Q3>z.QC2!'-PCJiچ1q#b}qu6%jU=.ާ8\DФs.V&<讉 mƴc_<4 -g=ȭN,wnKF1/Ӳ= ~|G- -&@vnqWtnqVnqRucba5͟sgTn*6jC&*{d U²T!P9]ŔT`l>o0yR;o[Bm%Ky۾c% Y$:9(Ĭ1+鹊9;U_ \,qubC.aZm>m_7';ӕhl#,2Y46FLӽlcxx(nԈ;#nLFb]i^/uqJ"x-ǿP-ϧ6gX>KgoCmOj83h|7ˉQ f `0TT1[1,I펋S=xxUŇ{˔Ö=9DAbd9L|iԚ,j8@&VՓŦdgGa- il-YH2$h낊|nn+r35wKhrvxIg]1kϣ+g&S4)$_oͳ>(0[pTL:}Z5m/YgP1RLq!7: '4`\E$GbVX+)Yy2z?`;U'+HA`R4Ho;C26zzZ zW'Qq] :lʣ2 PZbiѬ!rM%,E4OL7f imHA(IQy Kw^&瀗3Բ Y'91D\<޽ IHF$y 8჉Ťܔpo["9u6zƿZ' -%[=_ GqZ\K<l0y_U.yޕM/\Hߜ F_*S'HىBcR[Qe'%u4s2o8~KEt]q}W߁J$jZ2rOJ N̑ vtptG34_zsKޥRRIw+^mN(ujj>q1Łz_̎ߥ WC[}Gھ[x^''dz*0LJƙ%(t5|]G3aյ^oKUkrW%7}Z=^~k8i~W^;!+aq خlW:o54؍-M͈ͬ+k2ژƓ5i(n1>,zz|8sZolU[]vrSF2-Ұ$2G_M 1?"<CPE:a /ޒ:~Ept_~xU~~_>œ?|OW4mD0"0鿽M[T޲i0KӶ^/|{a]_?mwX!*Pfn녗L$<ߔ@}Ύjq#^h}$Sp)֥U{YB4JV.fRiN"JDy>cRVR҃e5Bn ;s1WmW&_idMgfW)wҔNYD4\~]inՠlHܹsv&m{Ro4:%+ÔAEtUDgde!ܑ1{e=ٳ> e!q $$e\5! \CP)(& ׇ{;J /'_ٱ \n\v[Ro>DxQ>v3.(TiB1yN2DK|=/VT ZzTkKCR^h]Z[}^,GN]_=$͊(#Y=4&R _ο#f|hDN?? 9r4b;_3vԚv2zcmU#P"#jECEpV=U{TXCpU~uʔ;sVZ[+R*•ULksZKXO nӅ*W'kY uȼrJ ' l~kY|GVWkqq)PwXaA7yw1/OqQ5mOK+ 8uvMrN~[/uj/L:CwyJ+Uu)cwjq;iMR>;ˡ 㫇#fympY[|WrV'ϓRˁπ+jW/;W$0 \q \ii;\EWi.d pSpUE \pU3+䤟CpEkН"]"i=\)mϮ#\I+53pUE"iUs+UjCv)ԮJ ;WE\]"Tm"=\=CҢv\] \qMgB$-ևڋk\A;W$` qEgbWEZzgHeW,0+* \rkuB؇ڟ%\)f2mZ\$ 8n^.;؁,-ġ;Oa0?pFG"xHgRdOלT2#Vg:{Xj<[ZX&BEnhjvy1ʟѯWKE4A{l LZP*:*bJ tcAo֬-oovYe-_U☫w=nZ|? wܦ@36]˞$7wCI~GZcKuH;![nu '1bePeU(W~ʂ?ɱF5Qs-9˝R 3 *v(eRܓۋ`OE'*ϖ(ZaJsO7A(%0䨷M5 N #X:`?r $6>{h}jC -6m'2ߏ\M/ZԹUpኀwJʫW\&/;\%*nIU:e14è?ೱ-fI0f{sc&o,= ዶ ?ӯ{H]_WV 3LM9f peZQ#BA"#=BiӜF,GH=_8tcrFֺԆ=ʿ/|<*Hx=pꤊ%K7ep6i*(H!dվ}۷PSo3{2Gxۤr_ϪeMp \:,*,@{W#_"Qz F,}*4Vp-tf=olPR%Q^[Ze[w#]ݳdSҷy/)5B͆ʭU:x( =qݞbl`-4TVk%lڤuEoF` "Xq;$D!eu40b,Y%Ђj53Ӭ}'|Lrn:#7oA҅,zpX`T9,DDꥦ0+LDz֞ďi\RЬeEk\*R)rR5)D} LsR2?7ΆC0kVߌU6N^gc}hZ t^ &{ߙ|:)lRnu8ӳn6MN5@a2;EpU,2|s(ן\3TX AGIXOuA KIqhpH.2v7P0D6d/5IBwy1r;Ǧ^0?3ar9.Y>ATg]\^ub}p`vu`=d }1\alًHqr#9 xSB_eA7q6IlF)d_@?v~Mo_$@FT0"ZTl80,W+nKen!`s0@* HyZЍ}y)O'- | h~*ӿ =(spr_ᷟMRZ܎<ۢL{꾠O>PV}nMGS 3֟?edΊPg}2;ݕp~~}.pX/c/! l >>doJmt5_W&qW 8l_%&U9Et\.`":M_`T,kժAխ'twR9 x\ }t m/XL= sr^c%eRI%p/o ƬZU 54iryVfЁUi=KJ:{]z]Bǫdϓls{moޥoEPS7_1+^boi6L'"LC#/uUiܯxBNdGp=&H)K5\X"q13h gÿSXHEIZx*gtPucFU.Vz o'>  =SD[*о8V֯}b;[WB/rkNMzHwgw[_ $[ҙ"I%i||Phs*1uKl0;{ȉR4g/XV8dn;oX|Ȣ.uu5nY\%G*fDi7GTj<{R)%2xiԤwJqTQJudXDciU)$i1rvtQ|Zr!I6p>߼m6q1ls[ff%ؚ>'2}bJ}~]wA?90?yM\IQN)`-yM0&[ SCzi>62Ch̚Z3hױ4i C!fH |sVk/R;^sXeb&G-R)9q6d+>0B&Se*֟N.Q6>0}n =%7vЇ>Up[~Hوuɽ-m'`0i8\-k%NUkEіVveZZ'(,-b4s+ Fo~)vEǸ}V3!Y̙C:<NcʠZ뻲2\4l-KZGKvDŽ3@cĜE NVFZZw9;0]'} $Jig[Lbݶj=@÷މL=z@R^B_l<\ܠ(ƣ8AG:s**xGK^R& ͟]ӒkNxgE&@G,w*5R\NN4罋omSn+m[w|nf^C!8u-"h+vF)ZE(Iij(Ȳcqi/,"pÉNcF1ZleFxQFGicsiX#go9FӸeE/8/uOQ:gy"RHN:^@2%JOj· iȉF!g=Zdڲ Xʁoyߒ-ٝsPɷ<}KU՘ E8YN8HhTWSp(,Z;-9;ܷ ueu'JLo^IigwbLiuZGP6 s'S+ۥMmZY]Yk;'5D=WN^Qy']%xy/D.Gr{Vs?=QQ{<*h>c6ۋPs?~~iD3kh9Ҟk0ikG cSEX˦E)S/_N|``a{r|ѹ&E|[qqAHXݚcAS-DD&p6jnDh:vF&x3\8;9? ۑ=Pk 7m4ץifIF9XWNs`}΂tUP )݁9yO<' kݫ37W ͛??40Ug(j1e;f#ecCFY'" #aii Y@$\.%6!`BLB1 8DPN k ͂UkYó@ag4G3+DI/F ROv'}ź+V}v[eF3!3NhK{i (-ΠXN |R:yD`;dT1|bzڞg~Y^G*#R"%1v2J'"hcK`@v{}h)/R;xZ2ڜ~9=Oj,KM3oߝ_x}.^6ͱ޺ʤ]tpH.oj 7,Y%'xpwl7ןo{o$zx?ZiSw~_=:Uz7du4Zzkn9 v/<̽zu>3qRȃAo$ڙ/_#ɇ.q)+ T(FeoA9 X碉ʺH++vU}6Kk!N:7旅Q$͇"*v.eW j@@j\i3(f2qGk-1Y!qʠk5jLyz8:/^GϿi61|4b:RT> MlB5(<ŐSh 0q| la%kn9}8_߼N_WYd(Ġ'c"1Bҽu!(JBT `g>s]@mUqǫTML_/%\oG%O9 HcZN6RW.8;:yDHWx~G 2LG[I/~ی ȟ1F SRi~wz#Pi]8tlc鬒9.`; 쬚XYXX *?{a @*2eP %YA?w$\1β7 )c$(B-GMYZ9SA( {,op UZݚug{؍-t~&+!?}2X$%| a+WiG+^E3 20̌6;!drhD( Ba3-IؘcZWX(wEׄe*(--&ȶ\51QNZ1ʉI{9"U2Z6FͺsDM`߁?~O9y,i6_:ev AcNdbT`ra))Ʉ,v=$-\UZրLu?LP"J(ِ$sccEm֝J|7gX9yM8>J6z39c6n~g.ݣY>a+o[՝e/Οxչ-wl &_~mj竹;2 fP䢳8[rFK&ւӯ3ER%%Q[sQzHٗlUQJca#26v4$㮶P5jj {TvrZd>Yp~W?]NggnA3L*ڂޓ>dfJ 6YQ[x(}PA@A5feuv+  @_"KbQbj7Zmjj&g%Ne:{SSD $ %[6ltn`UCfd(5{Ѩȱ$Gj-&dT˩=l֝waEl&wE4E-Q,UeDAB"mQhRb:t-EF"(j\;=x)u}c2ZP*; ڙyDA;`E#$l]gYwn=NlJl%%w]]⍵ftT1i 98%8Ձf3lx|b[/yG>&.ޞ~|t8ݯKʎ3?լ9ݯ>6YO}``ENqh4/,Ɲ.9,T]|U*0΄qr6;w'UpRpExyBP-(%ڂ#jd:It)pUZ,j(x DkԤH$s$ХnQʱPhb8L& ?fpnP +cRƀ*j)1_)['ЖҨm""ܷ,]E/EU ޮɫbDQ<@t*}0݁GIAPrfQl-+nFh`lPšI]qbtQ'V{][oyy= r넄17rb@{> l:yRR|(]4.ARR ٥8BŨP%+"!dq`  +^8' 6rRpA"C f)J&^6`DWwX6- 8NOlzg [LxR*+2'I[&9&⢌ E2+PRQ&nǃa]D@z:~kZ%POJ{a`Ȓh]YwnYk2$Mo\SjaWꈈC($"-3BJulM,";AdY0TMnnmB7!TzOfz#,җR:J<+ ]c1Y٪@nqvl4$5BHI; ؊HSDt|ɀ RX8,ߐMRul@:6'GMۄj`|p]/em:wO+/(v쟔\4uBZ.JN8|&y;.r#{&JfE+>oKH 2R*cnK2gD(t>@BFYT"%'g',&j՝[T5b u\.#W싙T"jN"x̪)4}%v}ѐm㈎I~~S1&ouFQZP#F}X* T 1%=H%$*[eX8}Wb0P8n__%qk6p5nj][B};|z~o,mꎯ_W|{~G-7=fwem$IznlG%=jOw=؍fmyJS$-R'x,"KidǓ̊"*󋵳Y6wx礰ͮޥg7on|L#Eswׇ^oM{s۳1o[UD\0q6ňg둣{^Ɵd[}]I4=)oZ}n>ܚbsA#"DGCz,$YZN򙥔А|@O)UȣqW(.#GI;s*Ku^R+yL ֌rr,*K 7+%qD xU䱸,c/^,n*>Dwe$%7p!߷3cU.[[s2O7a{0űAs o?^Ժ G>wDAYA \S&-ۻ[k3s7lwʁR[ e}؍˦x e9-s׍{C^u{aۏu l 'K0̟5)h0^'NRn}4.ZhaA^=``& GDϱsP2ӝye߉|hx/p9㱙`O%__^p'AD ι1Z]cl Tij;x<3xVY&&IYv}%+Xss4*豸+r*KH^b\&ӻʏg.ǔխdċ'Y FOݹڗg܄oe FD]8%2`ʳft&g;cLY;Xl!m_&#yv7>h?xgU^" Ik~K *+#>Xf&CIEE 8o8 IDÍI_}ZQ>sٱ^?:Uϖ/ .O'oGsz6O'ء͍;8%lSQFTMSJ}Oє`p(2I0=6Q3,Zhʨyxp'|G5%$ޓԯV˘>xwxy|ɲKŭ\g{;sE/%Ԡ5>;4j|V8*a{.v9Ule*aGQ`hwK?Jg)4N4a>9(, :]q]!KP‚7ՓrY 𧓞80 !>|tS'SAxLO)ːDsX!X ֈ B`"ZYGq.@.`T&@4 #B>12SRXͬ#u:$+v5]šC'fK(V/ug϶?0 }ݔ?#1h9Q6[.Lcu8oJE~}uI~45?K0nh QUIԅjUJ8N(}\a;j}g\B,KZI. ~>mMգ=?݊ڨY_OZVY%xȷJٶ jlxarNNcs[/ Ҍh_sCb- Dˋm iB i{"-v7B]ˊu􀫞!ֲ$at\`R 0b; D锨ăCx ֪k-kw-TI y<.e4{3_/ O߶?Xv9e&[V"^#~~7T3ArU_kd{΀ssqO a#@cּq* Km`uĮ&H]Ҟ-U([4䉭قkgY߁zWSj`]1Ũ@u\DYjFYN/*h{ p&8@*81,vQN=j%DY;R?NUdv (sa)IʁR. H;/7J)Oh*D:υfB)g2F XM;DQȄQ9PxC kgoM/2^G,Ic^T*bMխ|isӓeE{δmB5oB@LrG<2-\I\$` B<8OKp)t?(|n7['Xn&qHayg%bQ' t'2 LGma9>uר>IhS-M ʛBN;k\cYW  H P A]cHRFXIQ:>/ PF|\&%4mKس5\$VPDln&Ī2?zwf12s9Mɷ ֛)A>; x4=A9hP/ɴEwQ^)uy>Vsy9x~HQZ7e ?~/ffN؝K~JqM5Ns/Qr,g9&Sg?޴m7~}qvKcT;'5ڈ=șA> TcxDQD镒SO`o\`n4vL16_KjqzBc_qrL:v#~:laɝهշk+(۫_Jx~~6TpM 0_%.'/͜ȘӡƏwz]㢆=w#0ݭ9V^nx=b. scӾ-7Kٴ冬 TvbRӈneO VtS FTw3,ohCJ3g1c^u2уYp앑NYkXtf$!Nl$w,P}ֻ-ihS+3ztN➓5*F%ğoQ/?|v7ˋ^\g\30>xAE~=X׋oߵuuM9oWsm98oBi@^[/^ʪeU{sK> /Gb${ ,yG9nAKƕ ]lb\H\"pD9pHOl8opx /v|cayƘx}nz%LBNzY;Z 𮿧Vxh6X3 :N˃7ZwdoNJF{wVfγlBA#DO,2·4M펗ȇ sx<>̅5lŧo] cH/RWh"uBd2}1S1cҧfƤOƌ L@J7J3OIhK"`ޝzTA҇*&hj&(1DUB d5Gu9C^1wMu?|BxvssZn曧>=2`;_'hH`IH0e\&+-.`P0 ^j\r&BB-J  0QT$H:_4eh$)EPkgƫ۲,RϮݵy@MOv<<5̻ٙ;Z8[_4urXnYvsm79ծj|X17WS84>XQ0Ax=NY%5cBQ[DIYHQ0W$)#9^ q,tJ`U Jn)`:=cmla|am/5BGS؆wWlV],7 n|qňՓ?Ns=~ /A(p $#cPfg I%]\W?>Pfz٢Z_WƩFr'T(Ԁe Cv +Fzu !S)e !%vߥPKىBH_Rէt'0A tJi&EVL٘L+鹊9;UUd Pj>ɽwѥ i:Z9e&Fxa6F)KgGTűH/C\כJq\c͏<Ѳy /5 tzp]Ad]7RG9@D Ja<@ŢA+L>E)0mp&=#WkCq.>@ۧ0H&dtDE]DR>S}\n'{R[Lɲ&'Rҍ7)'Ak+\yYt,*?NXu#s*TX(`MV٣#`V3gjgSY ^e+d+x˘А\{)pJ#j&(CZVi%OeṊ6w ߡ&{TFC߆%(~^{!-rlOsQs79ϔL6|(H󤿎%Oz˭fյ/Ƃ{;;&h]* 6ZfB"61TҼ,S3Ojgee9qfX0IkPrMJ(2gU@;k/֭(*}|t $\TƬ@JQ+0R8 g N?x"[)&P^wI! KwdLE`l\` i)+LgfK1drV5>OK6 "38 %d$FX(?ぬsVDM{QqbڕCt▇2j;Aa'rH H9+VdX*̔M~H@{k"B]RS* i5Uf4ui f2 M[hQ {h~BLh,q3Ы *f" H)rrJ|^׵9c퐺ԂՀOCGf\)}EwĮI4VDgߜj$&;'j!38БGB\5px=ze#c!HH̬qß۽ T=g%da"7Qz1[yí1HWbKdÌ7$$Y$ r,1Gw1UXUF9pm!PȻ>o vWaWQW$Jd3uퟺ̿!1'v4rq7Lz9E{f;!lhO{̼|+f_fɞ֟_!:?Ö7\XKoxEp,]YƿT]*ތϼ/:; ^U cuEZŻNJ_Ԭ' IU靪}R3[!N .5kϵS}};cw]gJufߣz.(t*'!YdƀQH@QCvLVoT]5|`q>ma}~;9tajxB vÝ/CKm%qo)EXdF%E>$<5LD4*ds u.w@Ǡkb,ی`3Rݤ %$<\*(t,-cAq"(s)w*;j=9])SsBB2F\[R {njz_7lS J #GuHB"J *N]ZD;:g2[t=OO&;A>xR撲{*uFE_no.:q[E_8ÜRZ Ye#%;Nrgs^Ii ALoլC1X?;lPQ80tPjgvC4 Y /pŀW<$pL$,D>-&eYa%f%KMwh GZrY5̥.7p[L!PM)9wJl5 ?; 04Y?ޔ_Pxb~;x)ЏlqZBMYQwR ӟɌw|?}_dʾ,Xf5VJO᨟o@Xz:c~,/k6zxr9&.F׏'ЖmSX@Ңhz'N޸ŶF8O74rOczqNRvăڢz28[MTb+ 0xkAV7}JpK< LtLh'{\VJ]oQF殆TLQ_[jݥt8wN4zFXZ|ϋNKB ^qy]ezzuNf?&o7nj~JkSx|tJΗLf1"q4-h菓֙+82\Ch֡1&FtYN}K:Lޤ%o3m==8]e =d3COB͆gv/6_X@Bg8i%;oˠےvՓ5翿~݂c[b|׺x__W>Y?Eccc֖7)mNQbqs鯳bBEn>R~Fq܄7˓WWm3uWNތ7tyDZb~t˿/* ~8.3o-׻xW/ٓWzȳt>]_c9w>tK.F-q[?lڒu?ݴxڮ,_>2#Ǒ񷵖ruKFq{Ql.HuGi}H־>Q8hx&TL8rj ƒ`4ޯn5hu}cS}z..켯z WB-)C/0B󠏈_>~!Wx,B$h!_c^ 6M lQ/C۵,y-9׿ã"ə3 s_Px_/d\\"\r"j|:B%7=\}pќ\N\9n}+VppET=\}p7Sbp ֝$sVn:oVل 2_L)8b0?F_yj_6H4d7!(H#`p#m~_OφmGY0c܈L(x=J u&`<® w"}ͼyH}{qie.$J3|ł+tw%}\0A1TN6_W.hCPh<410siɐ怉R'K O"ר$jݶSjw O,s[zy`zaөBGpEZpU5/pU67<JYW \ a=+"XѮ\-վQk8u"*輻o)W7p|R]u4;K5irOZ_obnvdߎ8~L9`5 >w+"ͭ V.qWj*sO>귷^rV۫К;U8xs'?Ǘ_9݁6㜩S9^Eפ/P]#jAQ;,כuK^ JGpgpW>gĨ'zBqS_ջ$u~s%] :eI I+i l…b +\Ɉm&/W<%R;J Y>E2KQOO> c{ȹ/5/۵K睟qHTպ7EȬF5BkϻyǤ:/N +Q(U-\ $J>+@,H2kATT\c)o}*~ʡv"[j/dIgۚɈ,)97'8mHN -ˬ@BΧܔx>(4Ux3W{U_ʹ?l+y'O*h ^6RXyQ o4vHt;uzs(1t''$ހЊȕ5YBxpbo52H!FLAr) K699}ށ'W te9wI zmBkAf=!Q ¨mL Tą2"11LĨkd t$ *U"+^%"7BBB:k HJfceAFÂ'W|[5WsT+UO6-%vhzr|%󪾫wX r gG>X'}R[l`펲KJUT~ .X˴[ʢ!"xRԨJHZVEFMRJ9VRDa* "yL6g &5xΕ@62V#adTj)Be, KJߦE38؞bZ~$=?? &ӯZ*TR #YOhDd{qY^ja90[a(ƞOADMI2d1벐%3EÈƣbb jWMQ[VFm٣v$,:ia2IN ョK&r#YI d1 Lu5ج!QKI3䒬hk 5)$'HF5_(~5r֨_/+X?6EDUU="ł0(he$Lb:0/W1{2ҷyUDt3$6" )uXFhc$KZH%R,k+#b5rFHS'\vYK6E]u=.NzAr: `wRZ9!HΓΆL3 ̕\6 H\<⡭n4o–-~d7p/9TF1K6p\BяVJY?KGeyz?HRƨ5@6Al1aZijW>r;Jv ] hĶ-!C9ZJT6(*at䨵aJfʹKhQzU:-}B<dҔ,&{-<6Kk>nm]Vm[b៧>i+X.hCPh<410*QRݒ*k*+ZgFvsumlqyf[ڹbظs^(bNΊBfS׊:R{n2zdB&'BEYXp$0) Qࢷߍ${;A7cGݧ&˘0M!`c3+LwH_ \j$dc@YfeM^xFۮvowh#Vڈnxôho$Ž$YvLq 2d.xdކ3 )1_q!4Ve:DPV(cGBI  %ZB9C[ w,Վ:F#m%7][c+32ƩPv}VM"aX1q\aǴ=_Ӣ|cK3Cq@J]0r<-IoϺYRy&⃧i)WxX|l/ B,-3Edy(%3:l,'ȴܐD'A@ H፭ 21 pV*kº&apŠcJf O$A4&+i!u)Є#hC/sZ#HmHoAR*L+#hiiHUC$e0L:?e2)u MЊG0#U#M١Kn2R6+N9.J=>u6ȃ~֙@|01=N [ݠ5p'!?K?z#d'a%ף+ꘘ ~sQT$hr#hoOّ)&^EGaihO?N(`]j㭾kn;0n0OԤܒyRDLp8d'Fp~20x0,_5kfV9}d ǒWMȌŻUn&?}q;ǭttr\GӖ~>ax?.} m^|?ƣW XLiwu5x0~Mi9f~u|ܵl)ͻZwf}lg]tśUubxĒ (NNgc flW\6xi C!^KƺZْTwtՌnFu7,?&[Ԥ,Xthq1Г6.ΦuΘVE'Z֊ɤq!#aEU,8AC~@x: $gpC5įfOGo퇷/?c.񛿾9ic)|Ee&#@?[M?ߴ㵚Mi|nW3Yv+@HZR xɤ] oS(@{uI6ٲ_Jؒi Y-h bʃ6ɠ2%6җv6,i.:vdG9C휍(z1?{FdJ#v阏@XLs3$|Zے$߷dŒeeIvHbTQb$Br{1q4x"[\+H?fgǻ $$$uHZq1OJe  ?ԉtۊ$R[Sm@X~?k0Ҟ^p71gM~Ϲ3?Oe6r4r~0 y,lPM‹/r/e®Ƌۃn]grQ|/֐J`}@ E%$%R +=kL+{[Y]4DZS 1PKwQß3&H|2[:|{!ҵM'dY5ӱ?+N7tñ;,\zvV6P 'cӁ[XF}kܑ($5Jո-?(v8NZ 2&Tu h%.j[: w#]qpŜ2.2RI[\uѐ{RN%'ںch7(Ύ(\]MErqюؑJo-h. DhJd;[[Jə;]џ"=Et/BPD-,3$!2['-N5>p)GOF&ӐSp)"GbvTΈHL45H=Jv@ %(OTɖaklxGKʻ|܄oU-U׫Un.Z+rh3Z_uL_קVXUsrZz}؝-ҩfU!,dwju5 ^y}:A %χ`mdiK#髷T\]ڎ5gSi~St +|jkz^1ֿD;50fZ+rUcmy+hHGfZ&eM K$EE{lH=7.^zl[F:셑nɩP[).HTgiV)ښ9uhZ$As=@H9L9(xc'p`M'ZZ@q#1HN ߖ8Lqy {>q=K>^OVCGSg4՟qGS۠\(N`K$㫻᫂ _UT5?$-Ke:L(t:S&HFDwg,O.݃ @)J.y /:(5( FԨv4v唗Z)}6i&'u<3`(UNC 8mۡvF1t[VKư<Lx@v C{Eň 2ʽt:En#RD "hi88u8-vۚwzvĬUiwTHF f!GVD"6DƓg,I+ƙ4DeǤ",3LG {BQi)]޲&Άz âέLeQ{B"$8d{ḒL !f   Z*e|zyzVeI9MRE s a*RD%$J'OV&Q֨wvRg'-`҃gyPJJi)D.ˉL gԣт)˹Bp{x9D/ xe:rk'tS``<!&1#sA.H|5dt#9x_)r_-+,4O$ F AS\(=Ty1X B RG4Lnj=v6CвvsYW_k"eUdv $3D8i:g9Pk٤gb, Vi {暢Ш$XBIjR*eVXEcb&oMs{jrM-XT's2Mǜf*' Ldm2h$UBǔz5>jO:N*N 9u ["uJ +|HRpr(_M)Bc{csRBמ%fKo wcdL,>~d!Ai@>BeJ͎#,i*Eɂ$V 0"xhr:Y =<5B97*Q çHNWR1QVt2!&{y~s~w"I,l~{TxQy\lůoDb7}uKEq*~^uuWFFRL~$WvR7fo^rޤO!~nϫEݕn .ݍlZ9OxoCF_sGպTqSjw5.JiдrLQ^\}YdU^8У6|'y|]T/נ?13g}o4o,>~|ˬV㎗|]X)KΤ&_}7L~p>{}O:o~_'Ym?MޔI< NzYm{ӛMWwGSxwzRz1|  o 2Yo?>4E>ċy+!dOo3Id< |~ìYG\}O9׈JvݴXt5N*mY0 nk y,y̙+ۛ\3i5gY9H z\ߔf~ЕO叞WyY&gjdǧßP?RڣR.^iP.r(/Q/[1cHO,Bm.ޒz oz_j4~ nY:QssjEmyʰLLy3OI;ۆ)*:%.IEJ6a I! "OB:h(X2j+v6{$RKp-<@t2nvQ$`%˚UP @81'%L+ n ܫH͍{]tĒ#FMɵvhVEd=@Y\@E}  E(`HpP` \ei5wB)@W4* ,' WWC,Urvp|H݇d f:⪃1F;\ cWd΂> Ё\O=z*nyaڧ7Xɠ Xc‹ Ũ5.9N()qβ2r hZJ}vk#\NzUCI5݋7:ۋ5.e? emq`A?;V1c:<*s$”'LxH~p ]%O}h*T3+hHƪ>tp #uVUBY5톮BWTCJ ]%>k`/TGΐ8"Lֆ\BW -<]%QWHWqH*V6t]ZR4SHW)ND*QW*tP`k:{$3+F)QFtnm*R6tutũHՈ`Vc0.tVJI:CTckDW WѺUB+2(ɚoCWgDW}g-U=@ݿ_Q믐 {~+dYk^Gw<8RuøS"K |ܚFz>^# Ν?3^wxC煸Y$?h#l<ѭa@#;~d0CX ߢ}d:@&"2KȈR4cK2 ,)ܟ?y71̫~Zsg!@|3y{7|ZegS{߂/ GX&{F1OCKihn`ġ Sx7 nI)9VҜy 녺zULnˆqŹ'.0K='J;>20R[IJ( g:*?f:t10̇Ѝ+FNz!9&r6ȹQjEw9lVJH?B"oY᤺O2/{5,xYShjGy1:-3 wa'7?  n,~bzOc$ycsj؟]_SM2=xcU<,ݛq?ll;LGd(F cпSn6oʌ;PIpd&-sJ 5-a lE[V!/[j!!|\h-Fg$ʿ?CR`{H1 QftM,hJhl#/doҦ9FѴ?Y*{z}GE+GbQ 1m?-܇ôuc"0)X&oL_TF;,/tG5ڙxeK]}vq2n#Յ$7'G F.lLp}kCZ:bjj*+0W C!fH  jRz\Tknl+Dy70K ? _{@<2m zJ f} k@`(mhi /0hM!\&a|5r΂nDԽU?>_տ?uOl< |K~1ПaQ^‹eȽaKNyphldXkmFiHiixLiѪkM{T݆U{UN꠵L+k.t jg>@sWL#,8ZI}Zw[w 0]BIyٳ7?iٯfzm[ۺaoiDaUԻ4 =5/\,+zr0ї+h<3c,2ۢޤzYu.Io{uĶ Sa)X w8p$;}@(2a$fTQzJseWF!-W}(,Dޔ2""N`FQ4`pH ]kip^vָl4~Ҫ+>l5)+;qjnfaVTB]:ٴ;~nR(rh(aroU>A<@_qp49I%;N8y-Xhm  i6`#*%NP*:dJyoYA?8oնG䭱֤jc"ژv󛥁Ț5pcݓA{yI6EnhcW4,K,pYj<׼G1i/A$К |X>8Fָ)PAqo[4Sv!h+Yj)HnX&pWd.Z~yꚁ9Ky_ɻ;|;98aGXcY+0O4"#"jRL]L&ۓ)e;ux[  ztb !vPn=1%|u&c,> З|ά9\F&|~]z6rD>"y6yduF]h:?Jo_|w?|GLǗ۟_8(ip" =o NIk\VҐ2i.I2s(tp> @W=x$^"p_*@ݾp$>Dgn#y*Ht-TGNJa"Pl$ ) 7TH/sEpFMc#{aI.Y{Uq`M_TrCe9%ؘ9\j8 %6DHZ+$fM5IFl23&6γ kw{.Gw{ ^𘕝FXJέL'-q9&@ls-|+x]nLh5MR!x7J'nc l}]aK: ,mP[&d"M&ÿ6.Ǿb:'BSϔA," C8NPQZ+ F{, 9'7K8VGI7/w5fw|3!^ȓ`cD q {!"m\NREtR:.ȾEn*փ金&oPۨf:D1RNpbsc̱l:9Br" $cLK21}/(Y06DJi$T{OO4+c;:!gb1oTӌP$X}OuBr *+qZQSn޵q,ٿBދ]6 `\,Qm+I8AVIŇ4ǀ,jS3SuTwuRLKRRN+/ZDLg9MݴRi,J_b+eIF1#Bn?ݽП\ ڟ*COfew/K"vSZ/IcC1Q 1Ii⛫{^M\e.y w6Yo:@KLtd򷟦] 瓥!nn5CRv7:Jv'MAY_ӣ&MX!7BXnuEsyn`7Z6{>_(]L>''OnƓUNNxe^]S ZϥHU ;}U'+6y1FSdEUta'ZV?__O^g k nUҿ70w&.6 ُt6}cakLmeiOve^Af>~~~W.κ4=]u^to~ld3.!@i]//r \\ 听 Q蕑!Fe2IPL\drzٶ/0j9[ (=QFS !CdJ ^o\c8n C>. Q;\O&R(Ɇͅ(cb",\۹12]TMG"[@lw)SPI&293Md 6G6g˺ON.NüKfzrAxхħ[3x}ۧN^5[[\S-֪%J?XY$bVeN2DP) Yo39ER%FQ}L&Ɍt:HiM( R8Wi4X,ԍPXxXx%QʌDj/Os,-b_63G`3,iqJ19ɾĐtT(И$6^E] ?셬QiUaS@9A };(ɰz8;Φy j7ӎcQ4Fm3`wi<Hހ/3!{ I0B܉P1e|0--ZiCff( {I0d)RGy3E`YSM}OBc CE(D>/\45m Ը΍G)þ1Y/`T8sd: SbOZLVLR1ΉL6"~ՑqqѭkPsX\tqэ8μ&TكPnYyvu(< 1kHx/8Xfe*Znӥ5n%~!5~U*,qzwveGԀjDߞA.A0L mv,aH ֒y AtZ`\"$HJمKD5H^)PdL@K?}3q̿ب;SI%7_N mM[QT#n'=;P%$WlXR %BN.b!TΒ y 7lafYhg'*SQɶxg^`"V+IMR>$϶(Ay!YIQ^!x]Jԁ :Iuc;k&΁v6*nv(*J]SEBv6\hvɒN6jp ^1";٘5l& I#1ٚO1/ )TNa={E T?G;"cʾLC{'A/u`Ml{87&:V{bOz"Q#N ccX).,~,(N~1t5s s7i=?u;I ~vKfU&*rܦ8WG_Gl;;ydFtdRF|wZt)ILqA7 j >:`GtQU0$J s*f xT$oiD9) *{5ȞkRL(,rQqVewvs@dzZр]n`\?cTD :<U4Vcu,J'Pda)5N%\렲lH*%<ߚ}21`%Z# "K"NLHAd) (dt&Đ/$QYBِ7 6Md/|[C6,;45pD1I,!I UD@W*Br(6K۽4,)^1ŻiG!օVHZĶÄ6l`H@$΂hcZ} GEݷ.mեA#^8ͧ/e '7tI(r}Y}?-KƔaH:&֠0&|eGM\>x;=d5h]f[T)g+!' 5}9Z)9-0m!p$dEGSLkvI,#`QšZs@VM6؁~~ }2ZGۥ[q]xȬ)ui5e+=@!Ǒ=;B-ao޼}ـXC7E5HrL S 37F :%D>KQ]jvZIH*c u* :M=]PH`jk~|Yjj>vɪ)ڢ&' Pk?T￶wwϏsU w>;Cnquaûx?Wr>g۾Ct=@gnx^捻1_7=tnu[@՘Qr?oA>tÚOw۹x(lth ]P`":i nS<·k{$&oO/^M.]?oz\hAKvSzg&t !9\zƨI>'(TuσIQ!ZoRD6o*cg91CEKB6S( Ib.dxf8LcH|~P 택-  uݬf<'%Nu[~*/^]Z:J|ZLBDNA=aF-!ESa #" "T]^z`dGrBd|_BhmXL30K 6䨜VB#!mH0E*"ˬD U^ e٠(*ȐTc`h&-1nGJ,ɧW^nn>}Duru75}~o'>??3#25q/R$B=d& qPȩD@ vW TَxfWy3"<5%lBm|i Gy.rr|.L3C])xy FtTg.|:yOdrHLt_aduӯB :]_&\/Z\|8< g\p0ey\˵׃!!]VͶwߋ1U~np Р59'b/Fq:7l7#KJOBm[㱍5/݂Ⰺ4ĩUx@%n8q232lrUzwQ/6oOAf[-=ּ^yw\y?|AUAYO*tӘco2;:lٻU M:|AǟNo(߿~}x|闟mWsz}sl^O&?,;K[Yk<{sxKfo~NԻxOo~>[@O~}}xOmڛ# [j#=96^紩3'DM!_}bRvw 恋+t'olpu~'vl,( t+-}Pwn<5Ew> c\]j֗ %\8伆v~n#zon#ˍw6~PjƻGx]t6mpiںi|vyIͫ'ͮ:L`'X} <'5=v*7@k(t&!ʰA2't\y-LC>E.'tq:rP7N@s]; g<9_e=p\}#+x͓ >cT_8O{7_ J;UIOq*b6$Q\NyˀOވϮnp73ߔ{7; '? ${wN}+ /mߏ+[@W.u7ބ*\3/ģ(]n]ã#zb0mNtme謸]|p.#hҰA0(4DF{ފq)4cMDr|T1&PT1yQHa8IB1M`Y;vޘt}:t⯛Z\tjVW~ mCka$-A7 / FqҬd :!JrAhRU! 0阊'8$;Zˌ %tF\8gP4I"i Qk2!`(aƣi(^Bdi/zBLօr4CS3A8QZ{q` Qc6ՔWnӥ!DE%1 PVR"T$0uc'4\- 5uVW4"坩y0B.8!5 R GV/M$olsx(lT&4M"iT#Ϸ[Ζ ^ӝ.Ԡ֟TIܚMF؜ܛMgywpc}GedztE[#`/G]\+BsW0u4{DWl"ao4Z+C+BHHWAUSqӗt옂@ڈ X=ٻVӧϏ.i0p}?:y[zMSqӘZhAd?dicXZSy{xJ[i'i]oGWz~?Þ;n~Z)PW=C#iQ2D9G;=tʚP L,-yF:(aIịޯS-;F)Q2 -jͤL-~\|T5^O/M^GD_Ί4A `')䰓t4FӬ _yU\}WEDkD?ר 0:LJ꛳F ؟7EUGh{RbKoli;SuޅY5aZ!/ ?|{]-m}2|P լǢI)9-54ZoRIy2H,KL+Qˏ>!Q)Nqi̙ۑj-.8,_`ʊ.#8h\{ɱ$QK-9+RSmrC Hv/+FJ2M'[}Vj4py_le⼫?@D{ ɕ_g̫J孵O5zG9fJn&v3Qۭx<+9sSpE6[l7//_~w,\ Z#)c;ٳm7auTg }yWmiX+2&›LEOͫ`!2⟹tev} xLi!5Qze}md<`5t,)o"9+:ەqCES_Ϡ ) +ub:5`p0 D [JΟRvND$rٓIOʣOOTSNW,EeFގ?$8MZMGab< DZs4b 쐉V=u7WLnF>\2'ˢz?L_m YK3Utm?W! (ѥwϬ"K9=li%Wd!Lf70y\pY3l74x)ɁKΫmTV#\m墿^@~fҾmoKJI%lW][@@voX_u@WM/w47r{=l} z 寇~ktͬ`r|A{Zgj~ Uq3]ףFz۹ !ى5s0]}'qԟ=d뺱Uwl{`VQjfKO=k3@cĜEAXJtjYVg)9gIJ>V5Y7F op-_\RttSx79` ,!>)[,&}ݟG#oxh|3?FXt֨mdu{mD^{Uê]?.W^gƀ5j>4W`&F\X.F?i}y;~.h،~6^ ]OsqݠJ} i [y:u=YHz;j~Z#* Ms} -ExrznͲLPʸKԲv+{ƣrT+)E*]n`{լ KNT qqURe; \X#ܺ[:8eB`n)jGs_q,ɳdm".`2K*Tf%eoKnӢ$JђdKk,)QpeM>`ǀ_у<,$9b\q?zNv#V4ݷ<{R)%2\pQ1g&xN.1rbrY )$d\ V1a-8/+l; Ke}sIY|_8֞ Nh!q%~ }&<>tB@)gbYo=ĉz [cR;ĬW逰  nvE%Kʞ'dOlgB9V;TBMgX[iI͎UbxTN},"䥠 O)*HD R*Ey:|D) (R֚ڛ `!C.ZAFHoJ oo ].Yc RtKG7Ǚcmn]q1BXjK ^Ȍ ^`A ƜF*eFn˘d$c_[H3Bz-Eil넌דUpU;Ob)W(bSNڏ5cFe'Y+KXoFL{)c|gƖ!04DP2D]i4%\PzJF84~9.j|kvUV~-/VkTr n,H"u+bLiuZGXmV! j=7r9ǩYKp3"اbLL}HJ8(%:b%0l\#٭-^9{ʜ)`S~K;ʔ>oIW}R;8+Ɣ[fKd~3xG%QAZ%Ҟ%j09yT١rUY˪T8--x(\wY6rz 5=h:ou5%pƾ.x=uaǻ6w,g>y\ lYj\â6g+i c)bb v (]K%O0Ym

.BSbnuIY?DgR!ckvHp;h]P[`nO(_}"`F:zB9*ր 0O`)A3ANWwDpRyk&t:Cq[=My>Jg8Dyhn;KE ~p1͜>ׁN_mQ;)S^]m66IuA]{g*Ʉ#:7Z`b^/G@5q zrˇ? I%?|M}XC<BZ<jk[Yn|xRHW+rSG<3bj5^ܵlݻ9ZͫÏlgNio|}u:z.!\s~tm_f0~j}ᯙ{FW`HoI#oFnlfY޲f[+XWb|'1ëmsn:Q7Z#B2~H0m|jVePƱ8b}:}pKIN;jKyΆA^2;O9'B~}WRӶ{ OEWCx}]zǵq+p VX@݁y'n_Sd Q7IG7Vy6RbkboM2Z UlX ʺH+ *6FHl+?pw11?c3@*w"HWYy~M$N't&tu}q0;玞w9Esk5g v&lGmla;kګ?~PaD"6a8[395Jw)zG={=@Y^e41A kY I&dDRRY8,% q¥oD)3i1r^7|$u'W YcEdʛNE!r{ED/ C7@/ =ϳٴ W>ƟFܳuHwrnT=s?uu|^0Ԋyv@mں BTwǷ/4c*'Q֥L C6 9}O3B̲(S JȼssU!fm懂8OL) V2ŭ5GNk=̬c.xۭ"cկatRd/Uf+eO>VbgvX /_M^ ׅbȗ~P07> #@3`_/}GTDw4ѝV쁒܉ b5I Z BAb-E,D(x[D-C.$GQ5I'?Z)# ϖrdbfjMBbJU`Ѻ(mdžlgyaOlXq(*|kll޿*-Jd>P\jm'GLRMͮ npvu3m{^dG6;R: l; WݿmEƻ;+{r;?\[~zo9}x~{RފzحYo9`M=!vl[yL78NVs/(xHaj:vSw( `WALWJ]wWYsU(h F;ok"R!rk3;Gx=tU)UVA  $/Xm+O ,iG v{l\tɢsZ JنV^ER|$K$i/FGpN ?)t+rv#ʪC)$K/2KY.3#9ܟ|0[suSDvx$'!d'PF,` +eD̿(^r;v=g#G%⭑>{DVdɉZ1HEUo& PrRIA0#}9J@-Dr΃)R7r,HH WYg)g/Fۇ3Uc&\TI+6ɒJ&*g%d=[Cx)+!o^>{LQSUl+nbB_P aTԺb *r }FS juYo'vҭx& XpEkФ ZgrlRRkGRG'/w$f٦I/P)f5bu0F!m-4(C05hY3 hK<"L[M_Su"w>ձ-*('ȓ<Nb䡭!K esѭ"6|/݉-=tt!wݱto z=V9|4mygP̠5d&wi}QwC1TfHJIJÀ@d&0C$A 3K*)]̽]REizM2۫j(_Ի{0pyFGn@JF{U+0#dSFG   < #-߄K@IJf#Xӝ҆T$l`ؤCM2(NcEεЦ=< ߟ0A9xwK"+U_#%ޠ0"tbqT@LDɻdJ)\CB[ Q 3fd˱`;;#kwMW5C+MNVh )Q6TP(Yzzaya} pyI1eE[JkF)DBN u)ÿN_-ذ ޴K/+~:+98g[ /=;1gSsh%%T}~wi hE &:S#L(;:CqsYB䇭P5>ssHV(A& شD+и99d1<2Duhͳź3lVtME BQEHoeВ*&,mMq0n(2k0a~n_^|39\#؋SWYbw{WB*g)jfYb"l}0Al)$)k/Wd|_DИLX-!%(3!Gip(r֎ d%3)iX8&H(\FSNJEHc9{$y9`oGT #tGZ-vLQ4`ˤF7g(.+=+uM:f<"% #Y'pF2[jjlzӍ_^^lT=yn&@bpuƓws7 4#|A* ,A1EnVS5O'*߿_}ԫu5ǟ^bE*Xb)`bc'|I)'cP`1Tw&s0B MhAf] vUknw8S?.xY ų'tM.rP1xpHC:aWRI*VG_j.jNzN7ĞS%oq 0 {jʹMַUm}h2Ӣ47(/YncsB΃>֐7aEF?9/k0ҎyPAǿκ.XJO=&sBf> 5|h8?R  vu/ЬY 7 #\'k|W`׬نEOfGWwj#E{Zbr|`5K;nvvo-emUق=z߇|bx7?koH@`lV`w:4'ǧI# E*ƶR.W~|r┊v@~?if9vn?gyA[pי7E+]eۻS1dh U[5uxe}JsV|6z9ڜ>{N/t>Sry|~7%ԂuY_NX#g6%ޗGfڮ,1l<~dMU3Dmg/7;_~׷yn o6|d Mճmr jn`M}ͻ9"Eu1TU?-?~Dۦ CDm4C䖠m9;yFuܗ/E5lVZȲ`1s['1N*T9Dde%q0L@zpЕ`I{1MR}U) ”Ν)6B)!XQ +NYn~Q I`ecbxw]]}Ԏ՞G^4[VHٴ_v=J*~hEM9д6{zCk^|~oS墹n0Ox}{jv;~3g%Ҿ{} g7(pۣzѳ\P-X5ekBSwf룐o~xX_eymvPoBw隌^?j~'o?|wr=6ߎ.گq|O0zѺrѾQP{#?ֺmbm:Y銀(]!VuEVf] PW&H,]W\tN@"Je+ xHWg+6A.u]!YWC`a+EWDς(˺q銀=+Oi"~x_WDiruG#]#].ۮRuZز+\e}Ewym>\}MF/;7N,73Tma'VO?zl <x5O1s#!&RN`prur&T},jۮcOSѭxy^tܖ1ͭ.OoOЬKTyWխ`K-esJ-$&7լ+ͳ[6k6>K>?PϦl| SBBZoKYJu3;RH BqC0*]qC!R775T^HF"`0ltE^pRQ>ItV0K'B\%DWDQ˺4#]!5 k>6u |ueutEjhUQu5@]Y+vW:EWHO]WDeutEp"ۮBuc vE͛A5+N+ltEMeh!y]YWU0X`+OtEMeh]DA tcJix-* B@ wxID}0᱇l-`/v^GźV̦]'aݾ|w>_NWc4}x:=#{OiSX 1SSt.yhнڍfVEeP̔UԨ5^͹sv9+UڨhQYSLWm2G&sk] 3Qh”PֳN(gTn9c`꿏g}{G =B *ə\kKF"GW }O!GB"#6j8R 3ltEAqj (ɺ (F"GDihH]WDi]ue@tVW U(Ⱥ,* Oij'Z|Q] QW]c]m_IQu5@]9tEAѕsZ+Uwd κz]y&Z]. BMD!*@[Epx|X}#h";}M#m7H]ӄ$]q!6;! R>t$9pIۙ 4Iq=){`cWsLgGQ GFjRAJr]!(S[ Itv؋FW]5(S믝u$2zp EWHL]WDsep!|ᢀltERpjҨ %v7T(Af] PW^$0 >Mkٴ]O )ʕ! k*#]!0(FWkT(}w5H]9;㶓^"ՈVg` cjt;cS_%*a?5Mҕ3mbUX.^\LYKO Ń(`^B&G("jrkrwz)4]\tE6y]%J`A0VltEVq:2@uRX:EWDtDQFf] QW3t ]nh{(S[ Ite33W*EWDk %d] QW8FW lډ.1Ru5@]9r "WZ."ZSQu5@]y H#Z6MH+uA!*cd+* ].."ڐ|tNnԕ^ד#U 8_DiȕLo8pI 3hqH7`{U&zn =MrE]F&E . wziEPB] \tRuE+eHW OtEZsтM]WD}u-<']!puF(Z% ue@ e]!nJ\WD W綫!їu]%7CUpJw_3Rm&ƌA~/vLhNc #V%UYCԴ܁Nڄ9G\(naQ!$jX̭S[ϪCQ #s&'h?E~,\v(`8ܾh(}b\5f 5轝<:."ZoSRu5D])o+|tAj."ZSQɺBsKp+3cQpten2tEOtEpZ (u+ B< H>"\#ʶݦ+Lm_Փ sO:}uɪp5]M>"Jڇ+/N;ͧ#+k8J+CUhHWl j6MD ɿ$&ʺYB YN61fb -}CRniKfp`6&2i/S %h)R)]l8h^ns*1SS ӸJLRWWJ΂pƉBTӠV&X=Sl>PN/ȻYRo~ڥQ{98' ´ZOOۯ~=Y^.Wwb>aX@'ߜhw^>l[F#E8oJ ~Y\4^%(uF7eQ*Y炱J7G <}ʫw)S7Uqش*JR ~R'Dcm-39&^&,6~樈TiSz\KK[FB`]nkEz2v򬫧ѕ-b+VƱ+R u5@]i逑Xi>A}J:$Pl8!=']!]!.u]euHpteVV$#]!V|tEMtE`RQu5@]'#]+MѪu\SS;{>mW]9/L(;e] GWc`- ltEMtE&~WDisG!*xS+7.BG"] TWvʒ^K[) HR)iq,gUKH [d %ު9q^7D蹿vc//n4 2"n9nk UD_;."ھ:@Q&@KJ` Ʋ]!GWDiϺR/i.Śizg_ONO)hkF_fћ߼y)Ӥ?̽:mΰP=FjJ }dz[uoÿI헿<9V괹Jr^3ݾ8-b/6tQhgmvheqkU^ӓ=gWk~}O؃7NuKuw۷amnƛR;:;ehQ6wn0>T)Oݗ˄ErE s}˲>LVjp;\OK|R~!3wěO3Lhב=m~}Y~䫦˦Ӌ^¤ Mi~H.U% YiS3𥄩,wt5>6H 3ˇݔES+_d`Fy5%Y)B9:*Ɖ*Vy RXm JY-Nv\PUY-P9PSEY BYe]YUZ4nXK3srzO0wUimc I[uud0@/u ˤGl'@}VEn];8%^wUZ{Z_tԴͥM$sd(!Y*6KP-mQRR(Ovh]Vd'/CS ThF I݊-Šc0ɺhZCtD˷o/TcԚ1$kFH)LE&e`Xh!`N"4=z\)b &3)e%lFZ+ $eʢrQXW= 2Z"){oo2dYZH[iN Ф4IdPQɔ #0oBp+XZ\$gfk+k .Xd!= z]FL o~ws;The+:$dyJ)Qȶj8jm=n ҉YUb9c$pNTehZ\ 2E%d< zs(x4 AJNj(( E?Ҳck{F` Vؔj()A(%!CB}T[2IeSq %Bb>I+u1뜵'Q\2I&l.-6JR`ƨ%gE2uALڳRȦPB@*!+ե$PT":˄wh\ z,XoRN,"@VXT`t[\ ԃshR3oݤiD)Rŀ!WM<@menX"Rs'j4,&↲Nqk 24.T+TRTm.3*p Aon=0EiڜD_9TUFdqzX EW Z#آy,X2؆~n$7S骐XI2"#< l3dBE+h`'I)XPPGBFtT)'L+|yh,*+zZFk(`B]A>`, kNC@zk<  VH(@ Lh44QD&+e}_n*At4XYt0w@]E0*C/pPRH 3Cu*Qkp2ФζZ@*7!SѝF4GQF hҞ%YP dTSlԕM( ? ]Fkd5=))Ewo LcSNUDRh( 2RDāj`#hV6 H۝@VAjP7_&$M'2Й:nH|ޯ_nEńE~ŬQ@q|]1F@TPȝa:I !LDue; lߙxagu=e!R YAӶdz|gt$hKpP 3 dZ@X8pi#ҧ-үJF%J-G @˥{wS yE nn:Y"輚#Pxφm|{̵]D0S4 D8tqu3(ip(t!mL"CɠbjSѭ1~5 UX(m@ zxT2'IB=0 JIJ/CI FVo;^#@.:㽱l,1Hj\C sʨI8I8]Xzꢀ+9b $Ti`ːv "f&5 T}wKBmy7-R:Z(ԒFy /E_ aՒ_!N߾xqftA>0( [7K~g{엾6}R2spnvgg}E^/of&=4i_y|`@z\trƑ^V''Rii>G_VX+Ūi}:oOg/mnn<ڶ3Z׳l6nںX6k'6y-')ɟ8 UX@=ZQZNRAܼ5; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@_'PQ9BC&t!`HCl v=E'Qd$; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@O d@8q+;'U@Rv=E'uF3 N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@O 19nq&׎ǸpC8'0BDl\z ƥ? }jkUWz;\( S+L: \p=>~[x28'W6h:6 \pM7&z^1\=Ar:Lp6GW=\:vգ^ (W>>p6hઇkz^[EIˏ|wHz(Qۻ8f~-HT|gW7/o6Y=T|iM"jL^7ճ4̓VbZd5V\S\E@Wu;3Vo\SetzLP/oV{b* EIah!\_Qb>932Qj -!{oRW:DzXX#diWһĠN`K.QGV2Ms!,7n}7NΜh}Es(ǥhbEsj7gw}گE-ܭUQld_ޟV{%눳ǫ_\!]e~B9TvO߿he?-enN~Ƴoܼ޼:õC\?y^^_6Iݬٶqaџ/nvZ7~|>,OON|HE٠ ({\l$!5Z9UC/ۚ0ƨj[NW+ǾnKN}Dh=?w;k6Q*6ˌgWWg|b@%zaIBֺ9܇C\.[Ix=_8x~O-W_>~{jFq\+}y1 Ro~3,>|q:|{w7@ruך },vڷpM(CY$eE,UK.]%5c;;9ksoך{yN~1L/~5闛-˳kg8=Lzv}$.}(r/g&ND\:}wM[ qoۣLSO!Cًfv\o?Zr;Z[Ez{9S5U\^f}}yf-ziهb)t86ꢼ[QD_=l=b2qx-_!)~M"^k)}n6_W nqqvu=yqV6,!W@b:+WzlwuE(ؼXY,?,}k#7Er%Y4Mv\b$&hgF@-<4Gjm`Vw*:H@6@܌#l|wpfyK'W~5,>6jذ4WtE큐k=s) AIOhր ?;K_cnQqҙ&:%$3ϋ`d fBsJ\>>r=EOmM[+{Qɣ킫vdv.vDJk}GI'gm6!Tt[Rl+wt=ؤ!iP:F/͢ Ɉ$4H 4gg&~XT4X_(+B“•Df7oIҜl$Lv;p/hp8<Lg߸:ҧ!Ivg>RdI)2bҼɀWF!.ȡVȬ1`ϣ`2!Eq&&eZf,KҨ$ZQK;GbvkWӎcU{KYtrHe TJ6rn*ƃRi "ج%SEȄ *mek0hD GB:)$)Nv릢?&~&cGzD]##q'%3K's+#HJJBeL+LPp2:(6.rV\L@ҥ4Z FQeXMLi0NqXh*E/xAq d’vF;!dKSi.fP F']մX'pa=ǚwlȽlr#ߝ8OhH_&3kje?5k ל(5V9(g A1r25±(&cMi$A|u'B&Rn'BjٵDȣ؉!~"d(gl^ZprheZeÌIyntsMUd]9zEd]M!'z2m:O Z/h tUgcey-lty7yYF NCtI:i`Q{e$QZۗRk7Ӓ&@u߀1WʚȺ]!뮊~i;`aqKPʙ4}'{WRHkd˾)/=}1wK2NAOΊ:st# A 0P艵kT5O/K1k 7NeOrOFVvTd]8X.m9#ݲP%j1:Zǥ2ۜ|bBHN#a'A:%sHo$\;A~`Imϸ㢔֪zb#O.̩%aM+:`0:oEЊˎi.Q,CC&Ads.3Yc QjZSB0҃S dR'Ihgiy Ixǰvl5qV޴CBI-Xwo 'n~(r?5w|Hpbk0ՐG3.[2 11iI -xFo?s!2I2晈{pRL:Xm/?X颉', FQʗy\T(6['2::[XUf>|'Z{lc9QlzXQtRØt""3 A&anz/{mC".T@,T&(sPhhBAK Ki Tא ) 8 %DgL3{'um"OqgHB?Dbwy%?3p=nB'P p ./15NJo$H/jp3✗V?>lݛN9M (\T,hzc훳?/kgD”1J R_귦[ c9|Qys?;#$R{??w{L?{Ы0QӲ+9PxRh"&8!G&ίGk24]tFn< ]8 #"]#c)T@jY\-[znaC4:ճc3OOs˙]|Jg׾umkxo|bvu}~fYyI >]ES.|kLXIVRcܵj{l~y!ǖ8Onfoa0W?Q>^Οtwv9G\[RcfIq&7tm}}ueyGFi4n&= =sp59k*Y7mmjwF2-TZHXq\xO?:zKm"{{ +ouS}"ϖ *I&ɕv&d8SyF((GUA_d١WEfDx4:i]PG%!0Me@>{O}9"O$)KNM2QhVЉ%ȯ@.R+fn0d:Q PwtRwn)v`1zˉLNN$]qo L B},T-U[ !2`ى>k+h)vէt@ I)BGl)=)Dtr r(яuNmC66IT!Kw]9 oZɠ|!:j)Nt;L.Dh$E&'Z Ѐ4`7uw ~ nEnűGG/k',z: G3Z"DCh5*~23L,T?ـU/z:wCaN4"F%8>/ctNr!,L. c^FV6%Y $W@4KU r,l2#9x'=1cOW@~zQC Gm[a !<4a0-}vtֶ4AwW-67U}//ގYb3b>|G8w;EԲ͵/@heO0='1还  ]_{xzAW)L;f; ]ЕB[v}4_4{;`+wd<1 {>Dǣ|>]Dzx8 wck_^zۖ^h[r7kZa7 Ybs.R+ 55X^K+VXIJ\ߊ;l*'(FץED\)R WֻFl,ר9q.Q剻͐2g[묶Al> :a;T9RG٪^8 utC*fRweBi Ղ%42Wsn/wO<2uUwT/jG5=y|;| ؼuUJFVMA 7:l rRR?{WFd 1v.9K@bLlҚ"5"c7DR-JT+2`[GUUeYqpcV|jx% _\NP@P)jeVb,Re]K=ΤvE 3[M z˺M0Dm9&m2mp9tEǹ J8C%%뤌ܯ(C':ɓ,ܦ,sUM.o-u"|2\p)/K/*Sj'9}Yh~,wbY`gFiG%.9fci\f:l\r/ sλ2.v,xi19s\І qLEZ,wc; Ζvֿq뾤ڋlO Z\*Z]( hax'g)_'x(N\>iGQo˒BDmXLå3+HS(^a`:*uLjx?P"2M^k!KuwySVR% yȁ8:!^ulNnW%f;n R4xERٲV J0/$A@0X[w;"$G:"5>ٱ>oʢXxxJRNQ_Q#lٱ B5+neF ѝy*M yu@ z95,,7 otJp'y{ YȦ^+VObt~2ba ǣ/P嵖D=5=#%u^J>ju^$VXͭ216Z38[$כ˲А~'[F~;k#mFDgpMM>ndže\2ô , cbNS;(Q,hSI3oMFw \&5O73z6R_xE(qjcs|J7mG.~r8#s2jt3Gq3]}%+5gK >>t|*Vy9ˣy^4O~.گy\Dg=-Nȋ(Oo{r]iIwv-E׿ߵ>'ʋwԑ`y[TkpZ1{w=`F>uQڐn1h Ogsq؋ߗ=?ֿ AǶkzLu#wo8[ٞjo?PB +qq^xp%'%Kקb:xu<_竉ӏ3ٜXGUQ M/޿p:Y]{mgN?MiuݏcyԿ^~9ώמ.}8_ ޜϿ}L\z_(9*ѯN˟.y4mDeӟa8ώo <}յְ.pW_=g"Mb=d ­9J;@o<}65|?4RРg']-ipV-6w,m3ug'?O#_JVV=>Z\uDIꚒKcZVi`f(O•]ͣ;{wlp8!-:, 60(ݍbP+Q+0rvn6g=@R//pP{st҆ R3ZFi$0)|r3%\Xá ݐX7x`zT nkڭ.=WU7+GO2y-(2gDdRºy2`Ki\I Xa^2N%R*9T0M\(@E"Ykc!+6L%]gpt_P|ExKw~"δFʞAIplM %7j\aji Lh0Trh | J'ҩ?4ΖIt'srmAq5RʀnΧF/) 38lpؤ)\-։:!:s֟R+Bu7frdsqϋ ـbp.iڢNb:9IY`Fqaܶ{* =ok!%~Ixz N>neA<-MC56M>L&v m!wO # 78Aս;nmFj~m_=B{vb;8{r %O`gq^,叚+=XJ~߁R%U7Ci 2\S!&Au(Ī^pusK5h-KR(UL^-Ө=&ep RF.+(2Ypk%F4hnַ3Ȭe6IP/vV&Y= 0͍E-ۺbޞ`?-u罖mw餙ܙ@YtV7O&X^l~xOT^=])zQǾt%B7JdOї+yDPQJoCb6a(ļ(ܧ GDR;sNa! ݸYL,Yo{8x|\PĽ Z)U!X!E{E eTJPhqWt\`w:;εs,یw%d–ABQȺ-Vb:MV־>-0 >EhW ~טA@f~q>./{<c0ZZQ@z;n7] ]IMwY5tEp u}+Di 8he++l.Z F}+Di "]QK*+QWWWN+BiW HW;׃;=^{7`(B\f+#8Q"{WJ>Bnڻը+B~R !ҕٚ :)tUB^]J $]Y2ޠec e M{3f-hHNSr]K]fck0{{޻c]7zGoIK ŭeM .?n3hvo?f7޵6ndٿ" l z? 3Iv`vӘI7z#K${/)Cd-EN⽧ν:t|?yRr6CI[ #S}$WO!-r/+thj;]!Jmz:Bb(:DWjbz}]Z*x P2*{:BBp:DWX|~pYg vBtut%2K+TjXg rºBWVj^(eWOWCWRjH ˃)\:CWV֧=]!])\2a:DWJ ?5EWvl} Q1ҕ`|Ct ]\NDW Ѳ֫+D{uuteW?59L/.4F *)Ak WЇ;_A7.z]]zw9gߝ dU3}w<.*dێ#&NnP;N,߄b vG9@3ɿ/byo=@U_ O3|́R|Qm& InmN%pmau+;*!SZַ˯Vd8_F Jd覺Y`# ]jV>DZRU\t*oe"LBJ=]#]r5־v_6 _pƳ !S`p*Ĝ1qFE%Zm\ZG-8e?w?>>a?:w/,# }TIu {~p ]!Tm[ իd M==T{m;W+9T0yw BR=]!] a0;==I#+P]]`Iig Ψ+D+X Q^]#])C4+,CWWu խ+DiXOWGHWpmU UQWֈ#+c]2mw ҙ Ӷ#+k3Sq S/4紗*λr~Mkj{5F.}CZwFGבH"<~AyP\? IP9ژ+KTuD#sS4-^G.#䌋S% شov[U~7 `VYZAJղfezWO v0W+{)+DLOWGHWcY [B\vm+D){:JҒվ~-}%MFn6G%`ᵛ >?W~@V)H!OqLg > WvjK r 5]AQ(z7_31y2 Q~m+YH2Vʃ$lU6Y2M&4/.n)_oo&o*Y~+ڴWSx ɳz꫓+fcߎ:-\~q߽t=e꒟/iW|ԸYmkɷѥ;W/ˈsy]@8_Uw]+WXb"XgUmMCwt6 pwB?pZ?4,-ͫ<'P~KqSM7ozf/[hp|3#`y}r5uw\?8I'^߳UZۻGRYׅ?|-WR>,lSm4{JMlR==],=FݷmpR&_]Ի+w_nv㱛"4\cx/sǤ:) )E|ds2huB"䌐OM۫730 ( C|V>!4C!|F(Czua Ya eQ)AHHp<(*:xsu<,ڞtpkbO"r|>Y?|ك]' (ݢ c` j_K73ǃ7y E /xk(%zR>b{ rq<]\8*?{VA.q0Zy z a6Ew9*\ߌ.~'.C-Í=O)SVUr,<_ U@w5عO>,sv?ޝ{'62WՁ=qJ {ꀪ@BFtAو0P"hqs~ ɺ rX˪|b[4GϬ3F kJXb<moJb@ͫ^vfY}7i1vldQ c5?|/b &~=l 況eXTjn 8G@Z"ŠEB\x8x2 %'J Y҂O{&f6X/IoRDR{F;uV+"xD7411HgN㤵~6sgvEd3sJ9*ă#!f %,vk G#1OFcq;qȎqflEMi0pH9+d<[)!) i`Jj)*ۚˈYq-ט))=CB DYL&&-5SZ[Rb cC^iV@NPyy-փ S2H <1BIt)2ڰNm :506&M`۔埱˄1g#ORi 0 J3i%ay¥ ICAHd@Q4)1L/(cS[wϹL*&AzBsHC45gMJ6y ;rOJ JEfs'A#9%1.D 8lAX 65 ŊB7)T(=lVk$Q!':y @)Ìe 12RO:PP>ۯJ(ܚ~4j̤Q) ; 2SJ:0Ⴆ6HЁGQdf)LӬW~^ToߖdP0+rRRp-@i@4 oȔ% L,Gyb1"28{ N\ijMTF$tZ\ @- ڧ6KmٴW-Q /0B =$S4rSX`2e*8 BFDzch9 Ay K1GmpyKgb4ј >7n ξҧ&,d'; M NF)Ni܋վ᫿V9llmoo1 |߼m#-|YV+pJ3sdZ+eԙфGLFӑ Oo+;z?-]Z/FvS]OoiXy>[i~G?mOAzq)C:o~M"|,ƃ uqX\d!j ɠBQӅ)f#1|dVvqec ϏTh ǒqk7\;Ǩ\e*(\q hiE!ue@/@ΫhZlleTH,m}.h~mQ?)X3Arnkdƣ`ӌI2IMg.=Xلcq Ҙ_m5ELhB!DJ<)85N;3h4M".=YS#7Ců<,,[fܘo&fv/t/~4 bvl+sV>ճy~\ճx;[`Iq]Ynldp,.e=k&5q.ge{B /imݥmu_ns,y#/6\w^/5Oj{;opEG-m|f: 9 z_7٤^ގ䁼|ػ&7<Zû2. @s<3~]xT0_A Ȋi+U:V;tn>QCEe?XTf@2Gp<4X=H9W Im0Ez.ƀnp9I@eB'@xFbE-7Xow/()^{QBEX.Opm eE46rEM]?_V-Y:vvpڦo:nK~exE?‹7K>&Dɶ,-Tz4 +3T͗*-@ Lu.yKZXZE OL3Ub9̩;볲>XO*IOֲ$at\`RvD#w&@băCx Ux}fĞ7_佧 bS1f[5;SL=LX?*榅4gC.igI.zQycH*b1>ɡǿىN'QqVo;!pc;#W555GhO7RAfӚHCN#td9Z-NY%5cBQ-,H(-s$xLM43٢^ q,tJ`U Jn)`Tid,&nd,gb#P,䅱wXxY7l 7W[_׾=4zw Tv0gVx BȅK '2;h@HR(Y;)'\{ڲ&fΞ pg69 ( A1T9 I Lt}L+][J݈Fðb j6FmP`&x*C_A`SYTaL aA >@(k&!CFːzўGX"ED/Xt1jTGeK/&n*X'}P՗.LSOՄsb: &T!2KRyGTX/cP Vֺ*n9Ǎp,ŷ8w]؁Z`ciC'~r^lbTY/8qRA0LJa 6a"mr1^gա8Ip>t|gjޅfv.#0 Z>bgA`<ǽ 6;0TPATr@*1{v9ֽ>(]h#) A{c9h dBP%&PG'V;/ !QJyBm,%yK)jcAaG@FY8QȄQ89QxCBiY1qhzB5BȒ!lz4)_{{7j+]ufX}k0:[j3tUMHP)Rh0GFr[pI$ߜu1Γ "'wjǩ_2rִH̰$Z&.g9M<(nWIi8p6hEB!J.(7NBOf@=8OYXtʺ nfYɕunl@@u`<1hN29 ;\)(ÿn?Դ"VytBK<,G g p*!16~B%4:E*H= G8b^MqLJ&ZiN]7[Er!)oE<-3!gͻ " Zhomv}nb18"OJjT JߪvGȎAutwo{~}{'oq 4 v&;p_}׆457iMצQ~wxC^?oy(U#+\bׯKBםlxwH$h]_e# /F 9UlN `p$XВ2%mB(R)+,* B343HOl)/mt8<1Js'6ƴI KbJ ~jm&Ⱥ=Ft&qz1jMxhù)}x>a>)kt>Vuv؎҈G+yZ\8BH\A#8(NBf /8By2~!tg5 T#i9(R[O=(>R D1/ '\zvzy`,LpCR?A E<]8xi $$qZq1`I17 o6"5zdwє5ˏ<]V,z ^KXo;y 9g7o?p~f]kKιt<:k&Z + BaךZ Vtvv3T}{<[󾁁C$p$Z5! ڀ@,IE);cX'-BiU߱ݛV߾i>7u:K5 q) sA*"YM~E$OI3R#%8//3)!pP ̏:  ~N~tx|9s%O9?n7jy\jgP`7*(%eR{%ʣ,\.3G] L|SPBW?*JBGVK jl{R7n1OեW׫?\fu0P*53&=zz׫URu2yf谇ػB%?'5fk?D|n0@/Y}bx"$T Ì,ᔁL /M#ྱ14 E B\ѤSIF ZEmA4 i2$!R1xBD-<O1Zks R%.AcܽK|oAVܧ>?蒻^6Blࣳ"V-"FMYę\7q-iE\R;ZĚp)`썺R/*SMURN]@ueBt*u*SYj콉7ܴ؍U$!NTety?ۏͭ%>ɽ LJ鹲|3&V&6kVJAW}j *7?ْ!!x?)]~ޏ`EyKYQk-hi~n|WD.ՒxyӞ w}ۢfs{<.q=>-(8ozz4lǩwHwirn:@M`DYgDUWj2e EFЮ [2%E /(b<dYՠ%r8C~!k/:t+]sJB I.Pn_Ĩ#jiE@HyJ7@ 6Vgc ҶZ_na3d&L{&|)¬Z3= ]Y#B&0eTu(F.j[8h0B$]1-ib꜐eJ"N6hh@r'C&9ohX.:co>xY\d:8|>OOWov˥c.:z&tPGx`n&/+&/SxJE<ЉƍkZku#-/Cv^y-F6jȪmR$A;M Zʎ悷y$K0*Y,ZR9vZclY:uwJG Q`2s<\ AbDLPDDi9INt#5xWh~U~8{Qi ᨏN^m'r{1M~@v| ~ic%OV- Fh C=^^s'*Vcmm/7zZ4@L TC|PbʏIЮTgpj Rg *U[B#jZXr dve e舖ZӪ$p]S6Ogx5{y#zH 㫲A@6jϧZ'3hp7ۛ\M<땻7ײ\emDn$m2D- #0έ~(n>);;".ǟ.779.ij|ʽj5{[sUrR$eM%M3KѹLMv<;PZ3,*ePs9B0( ωF]n&Yjka# "Ot$Gd?>yr$WPA԰˭1r8;R6y1hs]l++g0՟ISw`5g ëτW֦{%?4a i.)TpeBaU1E)¾;T7EدFV+RHQ2ɢ42drLe抃Q` IC!V)/R胴Br$@X4.jg FnQNpNxtUS܍(mSԣh$ח~B."wvlQ<~r\P^0A&x"hiS:_ ѡNrY̯O;9;* `@leLeTDxBmp1qc'kT,$Tg #G벣RMRcTZJXRJ٬FmIA6H T`By$ ǔZpk(|p_CI<Y{"p&;ϝe)8 "r^3SV:Q:|2j5:;i_xPhRZ#PH `D>#$QU1ʃu1ӋQkr4mYoLGf ь!HcJ[!HM !pA %8yA<"H8|ukE*|ao!N$ 1 hf mg4gZ{8_!iX~07E r/ %1HCٖ)R"EjJ<bӜaOOS=4Z%Q,@a,]Av#:|3r ccb3d_C|9n00րM"88 :M$8Z\lwlt^3sllJDc 8J0e6))>"Ćn^uczֻ6䢵׽:͋Z4)I Ҟ4O/>Rn~*mҜ0Mr@KGd4щ(Ak;D8"||2`^2k"$ę` %ᲊD%D%g#ĝVZ$|BD 䚦RLqr$0;GR9K (xVug1\9ٜ ?52yki5؁-'ܣˡ;nja ?Tͤ>mQLd#TR\Dx%KF(bA0:"t\OMg!sC,@r%ou@| FF ;l1Q^d~##Ǔz$҂[(Gg$IJCM %.Hƥ鬾eMǣ1XWݕ  mҞ]m<,ėJx6zU,)3("i 1,&&@Y8Vx}`|/Meg~q6@5akzMZKro#?=8Nׅ,?c?mEt4q4[Õub,ˍ`mC5]$]mm®yWkqPyEl,"6ЦRڵ~Khk{FM.USh6oG_7.k//-A^5h4{SJGղS-C|Vb U Th4Ҹa`jgB.K~dV_m]t>Ɠ mw-u060@;Dءb&f3ègӤ-#]\Xta~/;em6r'Jt;A#7_9SD:2 .)eJ ]0HݍKMK #1Z*  u\H) + % sb`r6rE(-렙 Ĝ\kh*R] :\ ƞS%W ~YuA^xX Qpr."?>3ҩ< 8.qRPZ"YU1cx%_8V:kv7 b+b}gn$Bp毆x=P/^ hyA!"4N [gmU޵llr.\c 6Coa.[Wo=#?.Ohosa=o؛4b^BaۛyخKl].'By=TS }85i?qZ} 6saCBHUȃ<Bw=6nG]F鋳%HɜT[#Cg*7FpMႱFB& ?ygmPy:xQJGu q/Ֆʾ^{H vvoKZk{9%PY.,~뭞ܠ {sKvӣ=߇ᆝu»ݗ =䅛۬ro[ݻp?ImFb~-mϞpiUݸv#9*eƴ` Y>WՌVF:!V\`]2Ea$*R*ቭtJL2s:u=+u{b=yU斞ׂ^&z.}B^%c;Dh)Q)FXBSVu{=9M5坕!֧ٟ̆ncw-SV=S+R]?CKmuKva5+ *JqZ9#p H' t@ّG{UA4g!us $JD ʑhO+#hf(8!,Qqj-EaD[FrT$L+$͟BiNF@rcH Nkb|aC-֝O'Mʦt].,72XRbdߤHz(i_-?✜6\[i5WjAjrSIZ4Xg Q0:J%s9dKi0&J!x"*ʂEQ!S !9Gr3GsMGo:$XLJt *J A42vd,Uaa/X;,ʥP{g3_ .LdJ8'+f^BlEl2DS=*t|,WU jJNatm~x]sHE-Wq۹wmI l+W=3|HHIIdD!3é_WUWw-8b#aQn׋݆{ 8e` 0GM,lMX5u[ېH*N [y.< #`曹0+0nlsxc$a| ?i—lenÞpT>>G#G•ft: j#)}P&0l.Oؿ =.SSq3.™a zQp&@\Y1\cg0FZy~2\Ba…4 fpoV^-',ϲgjjýɲ0 Pݼ]Rbʗ;<)@Mfe|g<F%@v뻬[N&(Kge`>ˊi62?XMGY5?m=2ӔR7>KfRss˭,RVHxcF#Ȏ P;nnjd&R>&35ǔa옍i-e1yLcL#h7 ap) (Zf(Xp0k/ Ikjձ,|܌H|5i7%M[f@lMm 总*0 l1т[Ku+A&FBpC20J^ˎa/m:!ⴅh~>ʆqALT9&9R-&[X48L^L 쁧1z^O獓?[<=ك'cP{W'X\}-WK"pp#J +7p{W~#p$W/('=İgH \%n uJ\\Db'AaL/pz*q^ \i%#J 37pU7p; &.%: +!*+UV<\r!W/ bBa?+JpW*sO a=JdH~=q-W/LSJҽqmkoWK~p_$\ 򑷒R4{dM'+U5o/8&.{e/av@QpQ`Ă4\Y伡+Bp(DFzӔP4Q0+ktsO+rh0ئcna)%M1hDgv0%|x٥yq˟D;R`c#/pwK)p(D$#ĒWg>p\Xpr*">YWa\ \%n%uJ\*v W|YrCèTUf8Jb 7 x~QoHcq}q2}AF㹡&TnӓqhN7/$ Kz;,3Ǔ^~}hUXoE7yS=Hhf~*hIfBcD*@{wYg{"3ż+5ǘD%l#yRfօ 6pК(+!h]q  JB?+i0~. yw1}Ge06@ L:~VlbՆӴZd4K 73^,`{FCZVIDqPV =qZe9;N`Ҥ[)s,mP<>$ٛMYJ}]XeڳW1n>OI 8Yv6E6nR|:/h3}t~^cn_t岶Q &NK*Ljd<)V1тFQ4F!eLDCkͻzIuyŨMK}5huCy7OUev"`j©תi _x}a$"oyԼ>\ `vg}!#aQnimȱ0S& 1A}fTQ͍QmQ'sFmr$sqq z+(e1[&ˠ=)H+Rd!f[.*tEP.I ,H[Y. KbcWк_ǵ=E/3TkVZL,g` ک E8 clL{lv(D{dcA>ԡJښR;TOXcT޹>RĀ^c> *ALxqUJ[<!PR3 Ն.(*O5QY#T<0@jA#l-ю"Z\Jn,6U f ˬ)Bk V`A0JPLiP?.#uEڮ"z^vzf*Dp0yk0M1` ő!lFS"`}[4ϗ΋ZK_m`d~b~pַe"pS6,+)R;> O7jK4{tKIβI/R<" #0 ? Y>ƅZ?ʟYw"e-mP4[7E(ǔ@+Y4VivazsȪRw.7N;ʚEXDRk`I !rD1@s)(5h.m1,_6BWս_%ztb ҥ_T_Au\NrzrǙ8W{5?l` h+(xz_Vs|9h.^ʩB#l[) 9}H:k09&5qW&}ȳZEcymqd|Q]8Y40Cf\o^v/ʾz޷ !2YvC!TGnƀ؀[>eut blum5tMwpPV@g[? /.u Ŷx[/Hqr}$O"ȩWP)Lr! e`x(8QΨ9HOlK^h_pxezp'kh-Ëa|L.I7\2s 1渚V  5QOomC [kqraqVb<9N 3ZXHyGHܙuJM΍F%f!myvli;Kjeןv i؆bό45yDYmm,wTwW=|^B^|oр}hd5Z-vIO/Χr|v&!;vo.yUz??N_߱o"]s*P$UcUbѕ1|%U(Nj6.ƻfovx}5cv>dO|kN5;[Y쵺[<JGoI|u&$_Y4Zxwڧڣ) kTG;d"lyccUq]w<vsoS\ȷP :hʦBAV:j{ WN!mJ@aC!&J_\A+*{-`-dzY>rik-+i=Ql/:ώr\[? ]7ٛg;³Mޑ)zGݨyu (O֍^/j+ԼzP{2zHݹΔ(S~{E'JU΄B^柶^lNX3: D nQNU3az փB_u;y݂O%#u(ۊ @N9ʵ暜oLUf!W³\a7لEUl>jTI"Rmnv!3 \_㮡U|`V!4p~¥j +9)QLohv~XV'dX5F] ށuFŘ8hg= sX؃u>xKcaB3LuFp5i\bFq(+k$_mKXha\ŠsW5V EI1sIVTrL)hkYx%M-::ufĹg=%J?O$wLJd3qb5(T)LqvKb*ZhS)z"fT{VU;2cVU8B֙m#H$aI+ʝ 8 _ ÅR,^wMOFљƖ#;ʝZo<ݞo3w8@>}{Fw|[ir!H!H`&|лZJv; eoϧ/"'LjMQF@gʄԐD.TD* qnXM3B ͌k[oxGj,wi7uOO?,#fg#L:P1E.9-V,LFʚ: S܆ʤlb!P*Pf*{8;-s*M;Em3KUkDA2[ȗcgh 8)HUcQ bjڊ,P[a/XSXe GnTSXSk>xMx8:SAnq("bgDgD3e3P057lr.hKD9GK;NrފohymPΤD:nN7L9' !+6Cʽlt5hYu\\ kT3.g\׌&&Ŗ/ֈyqՋtUIRP1 f\|\L'^q(Rg<^&_([}_VȦ$p\O-?L\änjm&vQ/.Fɜj۷%΂գt8?s{r_ulk~eC!fqq>zcW M-mJdeUiT#6^4~Nrh7_yPG+,_-VR~Ѫ L9fke#ƣO/.Em͏NZ5Ӌ{wadglJx6ytNV 3<@Pg2ڞ&Ԇo(L4PUlT%Tև2~Hĕ1PIcWN&Ti <裂S;@4RupF m. 2YoU*o2+|ّj &Gl ĔR۠xyyĹGMrkf1afTtfk(Mhdڝ\$K/f1@n9T,^6Y&BR9 `1FI=Ζr7gCRB,XC%(,j+{YC,bȊe;-.(-Z+{q9Cm 9Ub\bb08 -ć JCQeT|SyPJu`5e1$뎖X[[ _ t0S_=p?4U")TMZI6NA"|UN(nErjuZyy:ItcjT;6l3 d%P@WEm٠Jb$9U.?Y/afM'ɬ`߯b0Mƈ¸dVg#".9 &УQ'lm"""[2˃^L\j|E=# d*ZeA%qIRD8'[<%Tnrutn}h kaP\H:B`M{4s]B;?=ɟvJW ޽LT|u 5Oy JLNŠUvUr6N$CbY(1O/O͚D!)R2$Z 8LRn)3tr6朣X Td2[(p )_ᲚdC/beV=ƬI%.|cŢ}Lд<خFMƨERE)؎ؔ( l:d!)>信`SmlB6vUr)l]}/зm9D@h€. jc`Q" Z&( h&X/xT€62"[))'~7eP(.C5ުV&l9/q3PO{Ze)0y(w77 Hf_}e9GRa18$8$gђEL:_GR"=?;~b!d4#`i%U~sƇ}H~l'հ" b]B4:2N%؈qb_׷5CՍ߷ -{ֳO*~ykjxbNEB9_ςO?'O231;Ury YZGEXAla6xٹJbJgE!SMP^ U[GY 4)$K[MJ̡1 o QdKnf/?=;}^̖o~U8|;X͍+/PHoaj:'=LnI `ś fW?^uhWU7m2,YN@%ʻ&lf=v“,RmCz|.+ç  +xVĕ"|qo-];'c7Yh?*p*/&y5-O{a8|Ľ7,EVu0y;.%}Aۣ,d9/_Lehv^{66.r//wow~V*ЪtR; a]"%2%]x ~<@FI{}O 8ޛ_y磋izk8Zrz嵖ih":Kt+m9YͶV-WJo\JCNfZ. ˘t!JqwF%)GRZ W]ФUIHg"08;fz9MH`aiGq`0>.n_gt2z8CE.8a?}bvq>~"UPN%EUc*{Z:SAY{M%RR$n#4]~|]{\2*bȰP1J5U%9Bpj%$TtTM9+Udhe&U!;qje +].JC%'*Fv Cqn8pL{@pDr7_=]mX -7un~NM[%?xU9{GʹT^%ጪŪl{W@Wȵ80]Ϡ{?i}N3qpSpٶv&_E_YQrJʘ"qҀ54 q<̞㋷_еkiXyVj6CM)7c(Hbb6J ѬQ{4FUh1`1CCۄ(P46;\A#1x3~Muݸ5[!LÚ:̾ m2y1hK‹ɑ+IZS"TEʬcJZiEj)N&l;." TuL\fӦ]v"tL8c4]3t|pyT ymfR]t|yr\xrL*~N*KN߬m3ϗ7}-h-r^&G]6l6q>MWsg7;Pa-݈O!ow;.uNw ~<~@6jtU?GR0[R_wo&p yˇ¤7ֲדc|Ivҥ\u酋 Gr/^//_KĠvt-g>/Y](DԫΔ r)jFק:?8Kgǫ/l} E|g_4}r\{H?~׽̳O:X_춿I}eɷ'L^|h7"쇗q9r2;nj<%疉7>9:tC/ʃo;!pܮ#J->Cg-C~)tYZAǴ+Gދy[{t ## ZKy\]ô T8iͫȹ|c.lyl{>ӄ +'Z_^g62(m dE֌Кj=K$k!ﻂE<c܀5]vY!7P|[c!4KqRe b<95ԎF=%&(A2UNstAE$}"$PeiYh 0AJVQɒ:t?~hQ7iy?RR(OvLihyZCNNZJ!])GFH)eUM$e`$.B`P- u+%*BƌX YMscRDG,ζ>A--H}/XR,B́ý[ SvMJCTvJXa,["TaUFsջBuL➭I_ _jS .y4nx̺1A>Sz;IrXSMV:8^2D6H!٢y\"ڰA6J҉Vq˪̉KuH9QJˌ-^Tkj; "luؾQ21a T.[\4/$ GXGWVʴJx 5 6ZSXacWCHqZ_4$$G[ɢLpR٘@@"Ӡ'i5.&$c/搔8(\!IeEgErV`FY)dU! Q@۹HPrAtQVSH*2(KVA @c K."ѾթB_i D4PP( e:=5° mk?R6BixȀgzLآ鼭_vҮf1B/EU N1FJA; B  &Q`mrm%\6'X=,]Sǻ ئKDŽ/(~iG&0JׁKlsRn2 dPo,b8/UXP@J (  HfC*@&i. Kh\?{ ȋgxab@V/YktSx[, +dm‚չQ,TGW'J(ԶM$N 6ؽE }#N"8S3"-2p`Wc Qw@q fvpC*R]p_-L5j*x  B R{PQD3-h B4옡Q1arjVW(t5^qzPDڢ pd*.v"(ΨN`&3ID[(6d)^8pƂ5*Õ-‚y(?* >)bdгH N5%bx.+ 3, Ӱp`k3K܀r*Mzˢwo2ߨtf pcښȍ_ax$\l:[+$qm6!á|=C(ivU  H.&J1<)'K} k`dY[OA،Q1m3ymsKL0 6nB5{&#޹{BLyE=A2i3tZu\TH ,Y7Ao'8c\ E?ځ惺C2GXtR*=RA(yT*]UZ`O <׃VC7쪇Fk\F V! rYa]\̽s ,@NA7y 3 U> E1)UJC2%L 3%:pUt.c ]DL ѱle|NitKSJa{jx!teRJAfýL!LPU$%,!~s-7|ϖ:?R%忼NZP*q;HP0"m0eP7KB8hE![ 4O !Jq|s{v kTg}!JURvu(@C10fV7E ADHtA%^R0[@$X֘{PR N%[dL`ઁ#LꅉHK  ;dCL(2_x\ a-1ߤZǸ~߁UUi WqS( +7yK*FM|\b4άC @ `LcYRcL+U_E%! AH ]YJPaJBC> )?|D/=V❊~17+hs߼<(s̇roӫKf}}G yȣ5vUG (9O%9߆Nn {e/c=\aAwQV+Nj-辶l]DQO&)y֤;kQ,eom[B]kh̀Ξ ݌tWO7RfGe}8 ZL'ycr>*TXҮLb.]eE*Td: D0RM\wíat ϥ2=Q&0CTwOW]ԍ-Y[\gPQ"X~i COh(ik{7O%|%{ldyL|~@ .E6xp櫄ժL'ϳ@:Ji}rkHR6X!BX=|R#F~CB~;u[~]Y`7.hOw6bV}1fq,ӭy~.El{s U!w.F) ɇ`RL`X+`.;5WQEBJ :Hp&3x tݚyvkz^MEº8=<|UR[,ﬨ|&n1l |qmu(m/Ğ8W qLv> ocpL9 X)!c6JW2B@"$! DH B@"$! DH B@"$! DH B@"$! DH B@"$! DH B@"$!_$5@xN Jv2H V 1%$w23B@"$! DH B@"$! DH B@"$! DH B@"$! DH B@"$! DH B@"$!'$zFajL5V\%}9fw?ӛzx(~Kk.t0Bq<Jk.#\5)2W bdũ+QO7W(dCsD ɘ+1{* +`̕kQЛx.Y;)L:8B :jr<y4AQoY/bg*ǏE틹И9oXg *m\E%9v?wtsKנkф@7x6ߺt['Sb8Ě~KFBQ, pCSVEVx4Jh Y<}6b2ʻp>U"+}P<N٠V+.I*k Be1nkUV(2WU"sEƒ%(#W1.]=H`+ G4q^6yRՃTmD4"G=mt9X?w) ՟Ǐy[w֭wS]YgEuoNoi}PE]x38O-`:/冠ij;V dgp \_K7{>z;o&R % f$:T}/F7ukp1jl? =o|뉵|lhbzq~.#&{'(+e'%ĨKTd^^ROu ~w_ͮ~qgNaݚv 4Ū\7wT.|1ia%cS`ڗ΢ح'wz6̛3a ?9ty#8[MρBD8fj肟 (-a}"TNʻ]e$B̶6 ˤAUXhz:d˝a D-%c +)V넴O!HS x YS@/!A!gP0ހa0q7 uH,n "XԀ'%Vv@l/<-/8ШӞԟMGt6~.otWPBaA޶#fXdz;ma]Kp.0};'p1}f{JRQV4?o Fg4m%nSez _M{$`ŇUg/ kŪۺޘT7kQxs5z=% "B ˲P<*'ڛWΧiN֯/lo؏h.}xgqv6zs޴iXoEs]qqѦ!Bkg}kՇ.bn}wWE_I 3qǓkI{fǏ vXՓu_ Kk}~am6⦅s80hgK/0 h3Ѭo{%wIɻ+3Z.<z,Y~Rn2ذ}hvn4=UIn"CM\PSL!;߆Nn {{og/c=\rCwVNj-x_~TԮ=}d>~zn_ZֻܿؒBfgCdNX)b5LN$lRݕxk.^oDL9ѻ^wo/޾޽޾?Să:`ЁϚoVMs{ѩm-zn7۽Q1#Ƒ_x) {iF2/F띹:n.$w+r@IQot 5p-Xt@c%C&QA+ #GzɆUpz4 zoZe-s T? \=jIp\i3(f6qOk-d7Y|"lHv5X5Ğ|l=h{ cV& JH2ɸ IA{b2ZM}zU9&NroP}x6_|3I?bIs Yd(ȠcD#BucGBXg>.ۗ󴿼~jPS*W ̼bH݀oG~4FJPbR]}в IO .5^__X:z&ޞ~t4==P}֌<;uJzn{yX/%=._=3gZ0R~ҝ5{=yrA^|SA/ )3=|&]zRm>w1x*߇]HdžUy}Wdq#?#V/jut Ῑ%70y_&!l)ۧחe&lEh&5#c=;O z\?(J2L1v:"wj% he %kKW ڝkwp|ݒ3ȿ>3I C[C|{tXW1OvtVweTMǂ55555s{ [FD2f"_ J4^_Y\12maԯGJB jjlsYWg1{=;Q.=z:?#|"|!_|B^pإュӧ3ґfTLIv(X҈:R1]78: PgS$E9$mB<(Eh4$E# e[1]QNȀ(G%1bR)L6{[$,Cc`klu6ΖJn C2g9|>!e94%?FAK9AVb1Z"&-L[zHJXuG4'kZ֟2HVf'a 9$Qhp6ݎJOB K?ǃpZ3N·ysjyb~}dL^Ţ&~T%Rʴ7d}~TnNG)gI;3DuJd6X5%dH{9Z}0Rʹ(j9S ݣ=\r#@ٕl Vpju4,㾱BcNcGgoȭ^sҲbqB_x:&Ա("$fO ̽ YA#8]Râ1T2jDnQق϶0PDE$Z/u;N%9:U㨭ƨ=",\$Mll$U߄MA9>m,R1Cfd(d9dK#U8b2jdCz8ͺswSgRghԁ) ֆNTmtj:Iz؉WRޥD"d[KD3 (0iJE-+*Fɑ (M.%8i"kP1sBX Z.f !GN7S& B1n֝-!OcH_zoN>B/<ZFAkՍ\G\X r>gO}>9|Z 0C1Qiu- pw ۅ.G}oUx;aYzoqW,vsonVt4e yFY$T 9!|'|ֺf #SSU}rJk$c:]<~uS{WK[\^&gb b۲v=LP *{0sA',z Cdr)Žo$ܜnA'Sې*mv0ODa8R`{ AAViKWȕNY;g )D Zl&+0p ЃS߀o@Eo`ShAU9]rޠAN -D]1%'ފ[B׶X&UJR&Tu#D\Bx 6jU0^fBgc&y|9(QR]dP[9A +uP}壕> "d+A2֏h9i3ѰM~Fmu2xgѥ}Ⱥxg/\"6+IdʿCTT"> m J@  ɆL ( <8 4)A+:c"YlgëVCÏK[@>2%KtDkiJ;Otu|y_'\ VdI3A/*1w>ɘΞTd !e[2%E ?Px)ʳ "E\dldl}NF!琵:C&}kA;܁hƒYW.$/ǐ}Ot!1[_u y|0&?'oU¥Gz7ztO&q7r<ݨՏ4Qi<;S2 )ˣ.zTᕜU rV޲'ڱTI]ngIh3,1[Ų|5O!xm [aQ*X:?X؝0Q,/~gz5Lf(^m1˙ugT$T{.22 w#qQ#oWO8ыI&F8)a -,-S3%clϣ{ؗŖ|Q #CA@g4@F*"d"p@41 ҢqImЦG " |IUJK1rJ=nPDGQ*Z&(kut[,60 mZBV^x\Ҳei+ 'u? #;8΢:)8Z ?DFP;Tzԓ֚tҹD.ѝnH_ ^HҸwɄr=4%FE PL+BzSkq%ed[9O[ o~ P%^G`sݺcnR!J*)K!N[͔^Ri%Qm$\úz 7>^K+qv<$²:'d@Y> >Z .I !$g 8kB2$1HNĭt|\0%QDI3I|rO&Udb1J41='o].#XQMfɜed96^$y5Z΄2aD. dd"&QN(nD~bD<쪚ܣ pP2՞S#/]U'<qɇ&)|}2 * t:|jt:!qI ˈ99m~ANT?&]` '}Q/K]gi<Ŭ(&s!c,ܭ}=T/|(}֬} gv:;P# Y[*C/H_?CYя dTTD^k<FV| g2NC(w51,.Um颜}ί i&< yRϾ@ueGݚH5ӟ74bP/-<7~; \@kka:O3y$P{*iUuz7ޖgN?\nhgze9)䒦\nrLMv zV qA8KuJ%rκ\( ωV]n&Yjka# "Ot$Gd?>yr$WPAԲ˭5r6>`<9́|n%^yl+g( g0՟ISۀ `fU(x/=L|13ܼZ)}Ru?m/ʤ#hvl+A|v,*Z}!oƃN &Tɇ93'QSN2K΢--EY+%V[^Cn>7TQR&KwXU=?~/hC͕X1QH&fh#vh'p̓3,hN`B샩cd%oksۻn8-ayF7uqٺ}uLa.}\^۩ݎ+֏ryQ~ o1@[QY` R ( 4)7%ǯHa@Rq;/TC5j伓}0 H|.⬢W?_wdvwT >ԟvro֟_JG0C>9Pm9U6!{b@}|6rƪlkn_b J"Rڂ1RbI bUʖ d ؏b؂{{$/Vv TZTw2JNdR*9˸UYǧ[i~B+ iLH^;'J.%y4ZJП߳γƛ.dCTʨEyߜ.m\mme;^;[u-b7W`r+\Ksܼ:Rnu>θDh#P d#f1L3#SM.ouh$/o|m<.qay7YF8gKoPfJ/6T1OѠ3UKtn GqsUiEsn@Uqx&ă˄$7BK(xbś6 ~< cuؽrTwCM؟wVS$36Dux1ho;|&8|JQ|cgaԾ՗B&9 !25LcYw V1pmPJ:AjWc0meR2ZF]‘a" "=הm~#8.x#fU-a=E%AeiOUl:8~/R0Gr,Aˣ¯ ۋ);2E#-55:8 ywr9*~]qf<ww.g9nϝ?KYA2.N'jh4O(ApJ2F?Itׇ)xaZztB|u~r3˗qΫ:KPbCaq>U>y.FRmUZtLiQT2 BCQ+rݩCUl1qpnjKp7ԧ7T.fͳg?͞|}5=?x !]silmY˦ _M+_+d^1hƑ˦ah0yUfyB`Xi͜N;\\zٟRRX?|M6UJAB2ɮ8<@p\ݭrLEV=rt $gJSUHIkp$XВ2%mB(R)+,*swЈr lg#=a\Jz~GÞ5Fieyژ<>s7~d=CXR!0Z['4.JLgW}\O[-lc]N:Vuv؎ ߣUƽRz~Z~p'&NR%GpP"-{]a- T2J+-`Qj'1GJ5(e|+Q'uek[ |ʽ7藍/ &.{Ei $$qZq`IQ`2E$$!~:A'o ŬT\M.Sg .n}޸pgo$UZ>}tKk3|Br9GܷVxD#-9P%x鵦ƑbmM$ 0ۻ/(Qw;m,U_ҖL}TUYU$$lxT?{_5c6v >縑N`֔A:+Kt WWXL6W%:AC!gܖOl§R7GYqخ{t |VFN|*Nufkql7''?-ysهYm~n-&H/7j}K(g}&oNy@;= s*!3ACr\1,oy7xi%MC0'//T//ԓ:F`'66.i5X+ך+yQ)g]n9 FDh:cW[erT֤#" DA~ Lv&xជJpVfrh t2`v?87w}<] j}8x| x'YC\3B=lê_rxoPgR숳`=CփcN`xFO:'7Pgꌓ@ |%͖K'R"XR0hwvmːL4x,_]Թ+ 5 I1s! qX ljSNR4I_|gM w)P~"_?>;'R$ 1bWh%ҔEiCp"f| Wj* N ':%1 #!W(vgV~X,LiJ}J5z7=9vF_gg(\|Glwm|N^S[\3R/RjK׮^yZHe%a` /.guG|-%{v5=>yAJT"՚{Rq=1 F`6.^{v6UqR=c7q{~X/B>/UC[W'bql.+eo]plUA,S)]$Zb/X\V>d025v^@S<6|(oKXBNa`#[ 3W^%~MN9M;ڶ׶מ.#W9ڠUlP()u 5 `R@䫱N-}lu>V"dAJ 5U p֋I% 15vRAf@sSX:{D7{#b M% &͢YYړJ$g2h}Qy8hsZ)ѱM j3TH4ØD`b3Pg8A{IrXפrX;E?/5cId5RD#iw@$t"],/NQ/8bgxBOy\P;n~|GgQu~'rLk=vԢsHV58Xu`Q mOUh'^89S;I;ɩ"֪)j*^GĒBES ^Uـ)B_]Ģ-'$"v9f}ƕdW>Yׁ8\[!K|~t+szwriBkӼ k6Lf~S.:=F힝 U!x{WG$=̟WY|( &^}bm|;9UGq*^,>p~*$so˟6-]m+F߰vաߎIg6&6_yQ'|jq\9W'_OVmV+.!M7''.7fh WZݿNiTzNQ*cle.+[c485R>7h&mJx"7ްCRq`c5>bP =Ccǫط Yq.>G<1R ۨu gI"`ܻ֨8#C09ׅH[M!"zƩrKy?oɺչP dOX$ ǒr}4ƋNDI1rY f}ugm"NJ'6*EκsM=iPcNqJ,ՃHHMvdt6E?2h5pc6&~JkG}~fυkje5hΐ S(QQP*'&_ A|y6Is)p]Z|d%uOڲqHr,*\*H|XI[6c83;.cRt+"v QOk69FV xQ'i~1K7&yl"_ D̓Th*kL0kjoZ%\tkX8G?cjMCέW6t}\2 -/_$Zܦ a{8ot&cjs_3^ _n9ީ$J+?$2 8T2 0o pBL6 `Aa,\Bޢ A]}$UV cx  U-̈ʉO wP%.wX?vgWipN)jbwbv>D-@s>NAIE]ʃC-]师un< 8Ĭgf a̖&$c"^H"E" s`!Ȕ,HJ)= []TP[`R.*I0L)_~GCq\HƢS>J. e5eBhX". CMFSuED5iIy]1,?y6aˤR؎Hlʻ$ :f)!俱1PmlB6ܚ9l'v_ojɠ9L !h|trwXvSG紱z`(BCe ΒIϔ4OiVSI)" \#R^`cS#kA9 PG]&X6 ZzNmKmxN(tJI8%xlkS%h_5gR6ru5G:ǐ*{29d\j S4oӴ%, bv9؛8k sjbƢhEHJ4]8٠FrDhZYYǃXl,Oa,s1g]ABi(r2udju_I6fhy]91L9ul4^><08$C 1ν=:G!&AP1 䭳(!0wbDz5R|ѣs'h7MElr)!TLk*Q5^GKRqFGτh2sArME)"aˢߘ TX1[<[tt`?2\At6y ZW4+θ*3sM6,XIBWd8{W(Г6Tr"Z PEJ)$p̷Ga7q cN[ _9FQP-T5c4*v 2mzb`h >0$nuݐzg^)uÊܬ5W#b-2=_;I5^"o&֡zbLMNѩzz l*Sa`5FPٶ-?#ۋ|m}XO'R8?~9 ѩR\iG\f˧F_#I{nW7&]"MpDc gA$~Pa$)F!u8^#Z χ;8u7Fηo6ܳp@{_{oǏ7-wmbw#qsBD!j|W3jVtO3zp;VoTssK]15_2 ̤ZB ;aLdڼDHvϴǬ/忣ōwMwc+IN4īӎ_nK>AkK5h:JD5Uc x1O<`Y9;i-_YN0|;A:N !#%JŹ62$$7A  Q˂ ƒE*L<<DcsR"!wK9 Q ƑCklf$r Ϝ6).ɻ-,J-6WS̕ |z *5L>ATpJ11p haINbB (@]a2h<xr;ѳRtk< {Lt"gǁ0' YhdB&IV$OӺ??TD2q̒^S_01r!$ LA1s ȉH­rm@ָ~/ ӗ̺-V &zv3D≁F˫U~g_'(:H.,3QQbj %cAS|#bA5ؘc W9o5*DzKNkJ(a(ezn} =O+?rOSymW~KwX4mZU{ErG9 W}_%3Тtu1ћp qrFqk L:]>c7"O}2ECH&Uޤt/*&~i3\Io\x ͒WBw&Ǯ ״B|ׇyށ ceW!꽢8MXb~2?,),]Һe= {E@зC9[؃Na?G?(f(󟳟_;sr7O~ӎo۬eO? f4}tp4z4s~?&uhrwd*INnOW@isӲm|?׿R:G8]t ti_s6mFUWz&[T-zRr˪ГBKY.; ]p_Jt^ニ-)>&,'aV$(@c>dS4_!KYnmrv\7d$A3K> f[^AbF ԎM !D |J7/M842dԿ:jd~Ȁ0-Hدelog<ۗҫv)G bp8Y0H[VLCKZPf2-eȤtJfZoA)Kp\kM{Jp? O,ElJ*L컫@)觵}ouH_.pWӔ>}S\옄29l[lr㎿YqwͱuTg6+h-BA@,eR&kl&tqӮ~hjدGw~~vkOkSaalFRהsa$YN{ʈλsbJPڒWE a߀<"5PM@bFĹNzI!JL0BsdRL,v%]<ϋ0K S¶ž'm:2 ]"gJAdmmBSiQøq~B: /9>-r 5(¿V8d ]:gOş>udY%G@Hs#CS*UDaqj-juJqTQܦ:2,"1rTEbm*@@ƶ!]klt>a@h'Z{/>S`} 8eTݝ<0?vBߗGU[)_E\woG\y]vhu5#W'wo!DwKmqT a|QR&[J[g徶@=G:zQ.P iC!e)@C릋kun鰼hMe KN4{[]*~YXλҟyA3Qݬrg&WA Wod!ո o,hΒgSϣ1 3Mv+~#eH?ĵQv &uD9Ԃ6>ɐgw1)lPkm3JN~t!MzH"֔QڃuvPE/b*YoOU]zsa^}@B)ŇҰ}eO Rm]528~'ZSnۙx]aN]mRRފllڧkά^pn]vwgR y*3 [ AYeR~+nqjo10 f1grH8+PW.\|!`-@KZGK!hL8P=x:FYLWaJ;ֺ:= ҟ&x!dtN^z;| jbq\RҠЗ+`d6Z業pp~Ɣja+ Brا?dE6g_?ss@q1& +nώGznyqýp${=ppCZU"XЃW\IE\}W9\'qЊ`}@*,Dć"JJTΝ#vW)D0C#`UV=JTΙ;q+3`i'e|J̸(KRYn'rza9jBh0!HVr)5qrt ==c3QW84cY:L9ܗGVw_ߦam%yvR.-xTξpqZ[1Ƙ1Tdkzytn{ɺX͂oGw+J^9oG,=X D>% 9W土À'[bR9c6([8S2@0cdFhH@k*ϣR$"d,,ie&#QHY5@O0k5f,`k1hakl j0|PZZtz(';|(]dr'pa& ?^tU2z %]G5B{1e*Vyy|L ,vRe8c[*~;.ί>^*ޡ &ѰJKNqJA1\-W42xEЅ[tuU0]W%B-cC-"גV u'|dZTG6_ƶ&{S;Ϋ-z߮ѼSb|wnx dmlJ[(0F*P&tgqP'{R"WCqOj5bܓ!$Nkpi+]J/8Z8!;iΣ4:gjGsP@sNvtB]\G9s^h-G[B+協Zri VZkh)Pڳ(" hreq0waے TcGƽuH À\NPƖOZ#g ߑJ.}=qr9jԹY4^[dڪ*.)4u4o煿,: תL2b:k\߶*0OJz/TۘǠbΤFVp=w +y%^ÚZ<Q%H= EA \1r -SL2%2J($PI+PR2pA۠FH"I`ci9$ sFfDdZ<^W$B{.=ec^ݪc^*usX;Jⴣm/x5}IL*jEG+)~ t6;>#U.YTl,|}##8,ZHyeE()1+d +n1'h'{0Ot4㐻Jj,{tip!='=')䑂M+E@\@٤Y %Zt9,+dL߹X6jB(HQ_tLJζTVSLu&g`o~C@SCuWqDZN k/BP‡DI8p68Aw1ZT|Z$czR)BF)#"˔ -G)6bOdoCdlD2"/']'r 4|wjwX>: hG2#P J6V܀8r7 )=|'Z 77TA-*,M٤s}ADMNchk '5tB9xY22!_-e@@Ecmf&`$, ajz5\r.hS(uf|Lq bv"bdtƠB;QvB'Q|@3 Ccx>{C/ɨe(8^LaQHosRzBT)yM_-߱ N/uz8V 28'd3Tm189-6cNL Gw(6헸զ_Wbߟw~9쮵+<4}Q-AʭXE fMKg6F>Eg$eW18Ӷ΀.*J JRM" f0)"|c8ی#e }'Pu'0i;`xލݾ<{{]Sᶽu~{~[lU%GxFB>e_, nbvFZ{B%t&rP%HD#T^_9I1|?F e#DbYrZ ڒ2JjJ laP۶yҪ&l."Ɵ>YY8&` UNbhFEC YwObH>9zq0EE_o~u6.44^u?͌Xm3׈EWGpEmaJ˗?׻訽ևkT3ku%# /0+(P|۝2Sb5ީݢ[W>(WCw/~0iI7zzzGJ[~<[vG<&p5eVV,i{fLw8Y]ぉY1{$l˫-mK_-Vyu=Ζ'[mp$UhN ڎ:}Cٮvcfsg.)32AHܤy6jrDik&aR|R)Lhԭ{/轳X [hoBE?O)|z-6|7y1[D5!00]6OrחWy7x8}@`9X =O|b7NMXפWWv%YkQ1㞥/! k{_'HPOIe9{G K֧D[;hѸN$D1Pv)ȨJ *80ȁȑ+#jjr߯d =w2k}(E*) Q!+ AQgH*ƶ;;Sm1m3X7Î+Dz ]޻::_ٖ8f1jfsEmC>1"(D((I,˵TP)`,ԥ&{) 2*roM| Y"IZ&BzU,T%כsXoYO@2TJxҡ-74o+&,޴[ Wu2_m f.O8ggm!?>Vq^x*N_/?܋` /pW|?r%|/|?jA?,;?K墧d]f!G^z9+M7ؘl@ousf2$ۡWm)$;4V [m^~cﶝm.b{ޙE6>|Xm 짟2ۨsDj\/}Ìq|'ql8m̓y=`'g:NMWTS$B(@' l'=68/YA9ggr{xJ~ 48L2)u.+)>D%qJ=fX 6ʼnBK0fk1wl>H>6wiL eXB3sEpR3֣|0N$>Ƿ+yΫs} ^Y"tN(TH $Y0d?}25=E3 m@)ثM=N_".-nXl'HHZp9R3(R%rGU+@aHzTQ%GQ%t#("f@ QR/%K0^?Y\jdmMm+zL7qͪ:_۳}! f |L ]}$uQ3$)DlΙN u~qntӏuQNS*'P<(Id!+JlQdBYHXh/[9]j8Đ"RY["$I%Gb1lFΑq1Px5Pv i!CIFa`3(ז)dMSa.T!il"F-L[0+a=o4,Ǭ SE)&Ҳƍ0+SVXPsXPe%ϰs8Yв拜7Yf|r C0 Cɐ*ٖ2n+a\Ad1xɃQ+ՏV;eo%^V>%-0>Q!$&1J qښR4Y{X[d#!-> #J15$eEm*>@\)lNF$)JflFa4Ӆ8㮺BtWAׯ^Wdr}U ڿoV~'f y-jUW8Wvk-lcTBDEPPzҋKvJas+XGT*ӥ{Թy 2خ]532sWd_xSw`X*M2JR%Zq1X{T微?C;/#cwul}\wެNGb}lܹ,)bNbk!kӭۅG8H]qS*1!ZCBYXpIjt.T6v!3Is ڿ1Q#7>{LL0M 5?YoSFVdpb) NFneV7`ދȱjGQo SkLۭƸJ*x%S$A3Xe}5 1~Ch\}e2W)q!Xip$  "tLB9ۛG;: Ά6ÒM{MJ!9GhnCZW@p7qh7㺜=]3-X⦳|&GYźb"7NK>ѡ\e%o1}6MuTSG=w*\ЇhM:*_g)i)w~tW/Ǣ!C5+x U! *SL)%:px3J'_I1=iKrK7]zOH?K'"V{TZq3yYyT6AG_+B?⛶fdε7jMG.Z{A-^^`Ⱦ*7&Wo8.z旟;1 ˛nIX,Q ֊J%'d;g;ܾ[~rƳMmgF#9d' <0u%+^OF -xf6B 2VGPIT뼲C.eȧT,G  TPd6T|x ~vH15ԓmz-Mln~_'kܳ3/>7 _T]t>/jFx6)SϾ+&B GJx~RVm1y9,,vp|nō9kT4{f+fzb ;7|G@v4~y@i,>"/o\{ߞSoPL[ ]\r+D8;]J!:Bh+d>tEp̅}+Bb tum)N$V=H~%\-լ_U.)పcROgVn(yE+graCq\-ˣq^s7MN'J! F F dSogaUr(Is"Z(w޾nnɵl\;֒kOP /KN`m;\;dFt:r+nt(AE]`ò+ LBWzOWR󁮎huFt0ս+Bi@WGHW8)DFtE3ҮU%\Ft5BWI;])+]!]i+ω0|A *NWR~tee"'*c]!J:+]O vDW +gslQo/QyB>-=p )%;no2A+iuޫRdK+(-cd.*EI%s]2MF'vOpU6g?v'U"cQ׳ ^QzҽM/%#p#N@pMK/ټߜ13`}h3\s`nֱt+,Ri5S/)l觯eř36h uq-pG>৸Mq}CյCtp;gq榅c}g۩Z3+ *"RBWV\a+S\]!`,"v~C{ڡtvutڂ>xavpm6tEh;]!Jٷ*]= ]Iùp6 ]\ ]!ZeP` #])Bc \ ]!Z-{"1xt2'c; ]!\\.tEh;]J9hWHWXrr#` '鈮qLb]#]Y.] 1HpE6qWVN(Y")#Bl '-:]!Zkzo :]%]9iyE 3dΨ i)2Q̦ ]zj6|vC' $߄?`#F\DB6EqG!| ڡeb,v(Vض`m;qndFtl* ]ZNW;8r ƆC >ȄNÆeO\1eݱclRwFE2ݡl_'DiꄫK6ؾsW*WqWPݕ'wuqJ<\zbpj+U#^O+*i`qPkA[yՋ*L iΚɕ?m/f?=g:oz}uLz5TC<־mL*Y0j=mӪ Gil3J'WmWPOeWQWt%d= K-5nO]d_{e-DRf?|[֛JUn삿^WND`qW*,RĞ*=Rj`WM+,RaJUF:qDp-{=T{ T WGWL*כYpj)nW2cUzpB2 T.OԮjT=GpɆipr4T-m~]UәcĕĘhbP-4\1J(t'wu _'• J4kW6m N:B\EIL31C r-Yp7f*}W0T'\f#u\t*W"V=GW*w\Zڕ<8po>3n;$+Owd.ymi`~7K~?w={r֜FjwUP(jgOw?fo薅y-j7_ρ3S~e0? Amx>f{̿e&/|[ǫgze![_כمɐwAFQ|6Wl,7⣭1(j'J\O/~G7j_.n_]6ߪ;G30PW['Vc%2 C7))Tb 6Ɇ  7}RBV7-B&768\m \_c,uꛂ5_o 4|a$K65f ;0rzhs&t[TKIqBqM{Hzo%_Z`$KԭC3Zah6)EZ)y7ջwR-!V<057Bd| )n)wcFJ-pϭ?#&ŀɌaEhƄ1Z#KF-9ȍL~wQ]o M>g-20س>xrøl)C4+81ИU6ip&hs `>4qQ nFdF)F/jpѮ"T w˽AQ*!Ȯ%x_@1 )v-hPGhpuTK V eF6+ʄiHiMx$bQAQiTߍv]-pH oj:H&bH:zSJ>XXl#fX57]kig2a[̷AO0k0ȂȄ4A#Ly^WP]9 U' sH00Q1:qI*)Y 9JA4b*A2OFX &e*c( Ls$eEj L,%l@BYg()ϐ*bf# #U]LI1>I|d0WdSuDR[[-b@ࠄnMY; q1u;atfUQ>62w &/ԙ9A˝{X+fӊ1QRgSa:YBLH)d)aE=t&^8on.kz|pV119QwuH0X,͐jN =ÌK0L!БMW%DajmHZmZU&1>䑨!'(hU( "FBy[ɰD(2!Y=Iq;F3]ZK1ZQH gFaYfWx$ ;" #wXTuptU\OU̻`:9dPJie.v}ﯯ./~7yvKVF:+} _{`#!eDCK ma/VsP6D2\St_ e0Gk`)`1 EbzK5ņs[猊` a< "[ހoBRCVvĒ6IaU2@KAky y8 P |d+!ntXdY03-5Bi5eXT{PZ;ʛbp 5\o>!UmHd lBfcr5. OBjϢ;zlU#ygTf5#J56詷f:WcQ,e"UK،O=/i^,A Œ CeY,c|^y~M?^At8iڛQ+<&P z`tu #8<'R$; ۲nٯgoto>O,Z?^Ã>ߤ][/ozvפWn߬?_Wc7[{>[7:?δIEO lgaX4|Xlc N)Yr+e`dLYRUTU2$o|ׅ>*pt?\i2h/e9cu$znf6K%MznЇgu8uk-,q4'31 Q}hGLUX,D%..'I " $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $K1$ rH 0W!Zu[+ A{$AI $BI $BI $BI $BI $BI $BI $BI $BI $BI $BI $BIH 0!\.k?{ I$@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@H! $@/ =&D). ً!Z.Ξ+@% d\$BI $BI $BI $BI $BI $BI $BI $BI $BI $BI $BI $~HKb;kòԴ-/-~;^'@ %C5. r1X+كK`Bp{Q;']Z7~w "^Դ90Sʩ?2= <{;P5wb!~T~xՃ-Y}wQC{x؛>6-\H,eY4 g'.4i>G-À{̜.x0k?Y>o32kh[ظʥ@ IUGM"7"ۀڷMf%J'RVc)({E!ۨ% C&NbcM0A4g2W2X-(2m& tΪƺB?-Yg¬} z,o)\ _sb͢YOaWn741ʛ84qpO8tpߏ^ 3!γd׫@dnQo_KQ<&.oC _S܍N9x]B=}og !l@2x S~'Rc'[oszxqqliT*JK"25GɌ Fc$Q^MdR%š8MOј=9K L[|%] -r@U}N5tJ2ſ>޵p/dKedL' >˽m7Y\j#m}𦅚y7Yj8k.}3N>\9ҴoV ^b3ؽXW6nR~H.y!=7u^@45:VcKϲNgQg1^8C hg_j@D d-mL(>'7a1ͪuVŷoJxgtϵAjZ!IuϞƷktC8,T~0X[ݤA|O.O7i؛y=B,)V /ݿ?E?-߇O_R\у۫u؍<<|<>}~3_ k`0ӡB}.v*ksgJDJK+є!V]Tu#]r $iM \nAW hF霩*ɭ5Zz^Ujs㌞ !wekxr\-K=zxzz zKF/=.j<2[=tquu:; vO="%&~{dzj* fߑ w{^Sq-;$S%n) !% 7tq:ѥѰUжD)+mi㍀DeCmF2Pm IIv,S5طN SR.T+4I.cɹ)TɁhbE2/E ȹ!TnTnfnl](/4Lm.ݸ03cwi7rw?3z8LB[mǝ-8$)18YY6 Ġ-[$4 .*/xb fVg}x<Mr?5=*Yma;;;J2t;B%5d1r@xvub/1St#E?}I@4jTkJ JJ-(kMcD\%:l+t1~AA5b|l<P˪ Ł2fCy,Gns>(c%IĐK&\'v>D̻ > 2rIIo" Bij4\i0$}{IRp%t0sˍ:qXirqq$ԉdnyglF,0zLqܵx]T7]' Ҟ< ĉ^^ddⴉ^^f-7I2+mjD/ \Q&vZi00H |c2H73(JosӷȔQGDG 4)ɓ2.XYkBbز)cP)("dVjjZ1f|"xɁ)Ç #(Ç<\ힳraP,??Z|u%-5.3mDgD )"ͩ,WZ|ɳk#4`xgvA͞[}/ci :JаbW5cN3CI6]{d 06i]Jx7Cgr` d3LL#O Kİ+EIu"I%UݾgѠz5\1>VFqi, V0N\ JgAi,4,KZoY(JJ}Lr+,!,HH @g)k0j࡛ }tmSfiz0+I"Rj!WWA%$&ȳ#H׸8U=nЄam)~:Iҧ Af:IctLifI81C-Sj-!D aD[FTA 6$*%-?Xe/h$$7kMLPn}eݗsgՓQ98p2͏w%ŵ>)[}@tKRNƧqyްKCW޽^M;!ʆN$|d^Ey2IrA%ieoeQՅ(d?Ve"NˮF)`z]>*(epR=c5sv{zZ/V3/ gea`X˶_0OOh`04跓oc;Z&.} z`wF؋.  7Sg{.2½g ٨3v̄Mߐ2 ԞenGøb׮V;ڢQnIBSM3D4:ZɴՂ0%p0UB`d445#!E4HP,1GhTS:iW;a5sv.1GV?G#n♤3go4F􁈔ĉ$CwEOIXR]Jb$6+MyԂg|`]*AɄ44ҡlvT#V3g&AQ{m3k\g5YUe/_VpyAY8ajJe yTYCi&ز6bVCS8|UZvn>rhEL_rn4_EpD%g~ðYJ15Pn-lJ -' YAxh֮!J*nDYq!qBJy !٥'?i!A_ ZBʪ>sx U:4"+lL& GTŜ S{BIq8|s }ȸf󆨥Z~RN,2.杳mf}V6F錗N &YdAr1uUU 5o2ərƨi홢ٍf:3m5'u5f]O*c8;7/y?e죏:r0ˆdo !!6JYV:0R=R*t*THȵ)u-nIE-WqguiKAl{qQlf"Qlz-\(GuN.&eJ-L,nOQ T68k«נS5bYmC0}i#i[Jr+ 66"xcEjk''3rdqս9(VKRɖHA̅3> $FOR5V^K\&.%\hxq[8IĤнyP`%j3,7CU(vJ> Vo KY5}P,;C>SYjFKyb$D*St~ǺXH ;vKu>X<[q.Zfb`%.I@tֆ`TCHBR\=uƽ=v *4LӍl+u-3,x"$n] J(fPIeU״OC!6{兏 eւJs,*'"('uQ nC:d]`JpTK҂w lsfAʶ>fh~]?rdlxW2;n ܔt;Okc z%Y\q:.4{߻2|!MzO^)q<0.|,'޹km#G_E?{{m,> {sfp;daQ%$;_Xm[I:@XMEvUWb{/S}Oٱk_~kotpNx9r2q{;D{sR=>f܁ɓ޽5)Uvd7#G⑟L"M/UgfV9}d ǒWMȌ*pnhߝOkOzVC3ǛޥU[ݢ~m/uqzqٿQ)JiwwjEC.tjf& ,6߮h:+ǧ+pW4';s͚Z;~=yS?jz6mR?"}:; $gjЩ&Ygq/.ψ@߼.?~w?wߞraO_ǫӷo}SQ M/ ݇vi`.C6g=_j&ۜrøt[p ^ZX-A/%y'^2T@}Eq't}hWWa#*$pZ.+HV 0gRYxAdPHQ;驝 RO9ceP:c2dcz9@%,RGCНr3h'?@;z߃zsso%g v)lDruk|,,:c#I/GT^b~/|1)M^>y'o2-ŀ>/F㛽6L<:?L\r*؏\GpK"{?*L8%) 2J/`$ZY !g8dzu9}P7HZ05!=['wpζ#zE^.˟pz*Mȱ smqG_[wouzxڛe/LLW?Л]7?Lf?7n[z9]r=;kx(ÕuqyKq8H$cCUͬ6JȬ)PvqY}KP%06o AGDl A$ 2BdDdYeTU5쒗Efbq`\KlZ) 2Zdt?6jkPc Mt>XBqKɉ )&.$BYe)L$@-ct? $)RGx=> [@ɓH䜒S>&'Ig/5vz߳n%vG"B"XaJ#01+g'07>8Ղ)|\uQ)cV HC-@2'>0E)&BbK*3!u2v@26Xu9dΜYxQeJQ2B <<☳Cq>o  fe*&;݀T!Q0E&NE2lВXj+zL0FLߠ^(_9FڠlHc ժPޢ,er2y1͜gPަdQEBei5qIi*Ov2|8,0e'ș:18k 11|>}"@n̳Kg[,Edw#r LheV_IW ^ WÖ[cC [Jt+Nm=91'[řF.bԻVK"WAK%-<7_9^c?WC;/78oAD(bJd Eg"z:eЌ&Sy<2u! ~@#/f6A2ɩ@. lhԴF lvn5o؜,6٧=N.rkya'lw;MrءgBGki˼Fw-hVTvX%za*%d1$;8??P=ps ZG@Uqfu^/F3 TRق1{6HX"C2,w=RK)L9x6Ane-Q0N4s-}rReNg2 M^r_u;z]MsJ5;f}]ޜ^ը'<ЦS NKWq2 \esIopZ_(,^]-m?YTXIY9tdseyxB{Q~M+0Io,I++` L̥}JjrSe~! k,W6g N8DZ$@T2&ʧ,d TD|9 #uQ+<*NzuNj L{WR N#(w53 Wy4s1KJ%z&p:H5 .o6Q.Fh %M<ߡ%߬or3E+OY\%K:3IHt3fsץm81ٲ_ZW֛Ntl%+ l^ͺCjYQjέwiy?yx{ ;|f~F[<|.Z+lRvyڞӦuO[m|R7D_lFvf4^**E@CfJRH.O}"'OdbC$Y1( CJ;Y.K&L.7p[6p$.rś]KBdV{I*0i-Z#gؙ^>3}.D5{?o= VS3 r{jŤm-atw63Uݬ@x>OmUe*rJ*Ō źkS7]Y](5ʉ dXdR3.Zi$sy}US*7eڽ`dac^iIhsBqn )Gَ(=kDkD :d/ [e}GeSZ R9A@#O xC"0ХJV;M<} $5ɭ恠_wrl,J䘍Hl25">Cfynϲ#@h812rڗІ,q t"fhy-Yk(gWDGWcC.F!jUr)kQd `Jz'Hϖ1NI|CzӖIՓAĘلh]6\A` *2̔4O0AαZA8=NR2BD*d\"(HHK̳ G,dVxC:ъ8uWRFlq @ ĜwvJeV A0'$\C'myDR%ur(+,V+ pŅ18(&I3o "MOS<(xz:Өz:iF>I_.ZFgǀIB#GC c抬iV4O>wSAG5/~ rVZ ${c?,Y KD(f֤ k:zZ4 P6> m vZw'%Ԇ/{C+W(`fL%MhKdrI TVK"-FC\[{O,xS`O-'ΛoR7kU, V\!,io?2^`gHٕ\R(q4^[<"̱BuyXIbe7Q"(U៧76]<8=5?KL>͂]yP|5>~lZX4rԖsG'w@oKT8J41ե*֛, Dt*w._<^}ryc$yhZS~x%p2>_]i ]όv_IO8G3t.ξllX{gFdQczuʵ!wgfniZ7MVe{s) !1Qfmv\sq$6[Wk5Y8X0SO&E 腧' 7+3l~{ 3[ƒ 4NA ߹S?B 6kX7}Uc97:"ßsr8`0{W qwgġ'ud:00Y1A"n|BI ԁ)KPu d$銁+%]1w]1-1Ib`Rbtx+5:w]1-!+鿯"p@'FWi箫HI5wӻ]EZBbJWt5D]qy銁ɋU iA+luTwfj}/f7o|4n/fx֯GQs1MyiwHmVGu]T'EB}}M Y6 8t,JcŔGgM$ݼ4oRA;o'4[Ld PUP*E3Ā[cci~"nDTO(s?`+$(8+HW ]1nPRtŴT QWhyI+40z=X)bZ~0PRu6.IF7 2-u)mkfpt7k+6w]1%*X AP׈?d"]GW0*iz7]~\'@@`@c>|h¢w}m 4u@6eZ9`6]h^* TٴV&oOF߽8nMWgrvš]P5bWd/6¹0f?" Uy|O]m' _:;vfbr~Zδ~VsO%|9GXlGXqu6_.957![Q̚bOUq>XZh8_4p92jTfyub V/ƣ[O v蠟_lònv{>g4Npd}ttjͯn^Bؽ_^vOKtG8}Ѝm=,z\f>k냮vNMrj1SLkȔP> 7Fk}f_}K)泗vBU(glQq$z1_=rmkla>XEzY]Nn:}vVOg<٧ g p{X-moZux\5װ`A3yv;NS@>JBd%nNЯ v:}9>՗uAȕ~]4N&MVͪ 4S X=rOBt` Uy]ι-9[ӷͅk,:{|>fp^_n.ry]M*Ei"GO<;C /O7 ZpgZ}W~5sHJhBo> 1Z+7%'P_->dO.&Ή_MjY}]{l?}>8,kt?jWk~?uo;]GrOnxv3Xo4hs(^|>e~=t?5θ' k]̻ՉAzF󰶗656:\)W^"zol<[CL~4.8P#vi6bN+dE~=inr4M[sy]q;UilwFSw;㱙M|MrwJ9=O^ʏYc#rv;Dܣ=MܧRMO_ww,(MMc]4N]kRSG{0A;taT2tte3vsq*#-9ex|4,gBr˙is$Zz+F &~]^k _WNi}F颫`ZAbbtŸ$FW]WLilue1dI=]U$EWLk(w]1e RWPe83 P"VA`bJ닮+t5CӾh1b\ki!+tejr(Ar_ΛAE-EWVBuy C+E/EWLmbPڇ! HW r]1uŔ+ދjg`'g1sWLKٿAQuZYyl7>F骚Q]zx_+o*,NvtҥZK0`e] PWNim@{1F 2-uŔ] PW^!7:-EWL]EJLui+ k h#EWLk]bJ,Hg%]E`{ʙbZuŔ] QW!f$\Z9tsp71&% $UŒDL$Qb`%5Y4Yɢ\Jqh-=U#4Z)zZ%̖f+#o>;a#4p6u/Ali5AMfJ_4=DM;N"SrtŸ}/J]ue $# ۡtŸKU%)s'Utu] h ]E`gH]1-e߻^uV;/i*?qHTbJEWԕ2 5v^bھrQR(V Jb殘BbJ(!*VR7KZ"m) uEi$*qbtŸ f!箘ʛA*ڳI::iJ+NN;6H?V6@+DNh;TZkRəzr 67 m5iӬL%b~/]^RtGXu(]1.]13)s rYtu]4` ]1p@1}+Ք] QW!8#HW 1bھ\Q] PW.5Ѿd]1+})s[tu]!i+&%FW;@-d?Δ] PW$"0h9AƵbڙwi +3ȅЋ%y7R*82J 'FWH֪uŔP QWu`{I0Y9S팋b`0RһVg%\'㮓`:$2Rҡ%˛ӡ%݊thӡ%U2[1\DַcS^m `N)ŖiNIMԌfY &dgb^k^#{Eۓ_nJۓ GgFՇ-?ӏ?/ӓjX@b[k|s6em|W]zwۀ<0jUmyuUG ڽx{r0/u9J ^ས|sŮSTƠ0%kPnAR4`4s4ھRPRQ L"ZRk+5&w]1e+bpte<GtF1b\iQ+tjA4d`gq=HR(+ Uq+%"ePjpP"09sW N0]EJuHPt$g0qI#] QWð2w^EWG(zuU6'ME1bZ})+$]10j1b\RtŴA+2w5D]q:EI]1`sSZ,I]E`rtŸ}/L]WLJj w]1+!ʣ:n}u 2.uŔ): ,{q{NHkCbb]GWNuWX+bzW֨ 2)B@Ez ` u>)+кnU#Njke8zr'd0oHm5iaN+&/~.]^;AڿD`Rbt]EZubV]EWf+rzW AabJ+뀌$]1C1b\oi]WLIEWC/'4N1b\)՚+4Tt5@]ט$`]1.))>]1EWԕtli Fʛ!{ꊁN]1m  ˮ$:1b\9+u&w]1ꊂ^U6ZU+fL EWU2bVv&DYOrVF4r$ղeϔ bi sybץ c35c[ ^14M匷?m rQ.]SYvIh"7ǽ}{oƮuVO|>5_wmY0z? v'|0v1&4#S¦ˋ=l>$IInGq J[=z`QIRf`n,HWZp2o_՟5;| waBlKUyyj{m)~ t>"m}7O8X r} J9mP%ݯ+nϮӯַ5[Λ{sʷo%iSxbt=kQ?S .i^T=6>E)~M7Esc|Vwks7ۦ-&ٙDH2%K G>E[1G[*-Zlfޮ?4wpx78ϚA욳GGL1{j2F)jy.#NCPB>e_d²@!bv&H2JҙAY4.ZiOIrg'`83Zx c Β:PTKjJA)G"$A[U)) K[N/6}ZԷU;7MUpa_0Lu=zΚmi[5gRnw>̟^]P:_/knrtC g\ԥ5QC]:P?p{.ڙ:lwy/G;j,zu[-RGΚEU,s(]j-jJ< l`KqlQWOUUH=R+WR-nrF5Keo/SZ-v8&f|~O37>Q3iִ& hS{v%Ƕ}:Y%􈢹pQsrjt~^MeZs#Z1S.#-nVNn|֮c%S:Lizd\ؑ.[^.]QsķصCGs!?>b7ŷy)#ŚQe렴Pv㇫?=d{7-|~5$hX| f2mGN~2d|bj(Ż"tinT? Q>^;͚#u-Z6CvWTIi8q,I?Rʤ9oWc6L\VQ)x:Vhve'ɯB 1qyMu,7gϪihVbԥJSTrX_2r&+(犤2ّ9M 3|fZh)gۡ6e/q/0/813?v<F٧;]4EU!sçq7K8`Q %NRH_:jxXÕԃț97,G#_B桸(\-8U9{yNj|Yz lazrv@^q¾YJ&h7Ae7|5uK;!խFCU}9pVocESg=[Qagzb1].иM a7fZ#=V;k/ۦm 636-tEt';)tέ`W!}p_vzgauնnlG֌}38PԞ'2kfQ_v6}]F~e?*8N!9Sΰz2jpK2\-\ĩ #u06+-TR=֐R)ilьsfUEg̵ `R:[uHIӝI*B2Όn,u Yt4!RRS\EYv@ČMk!0fxWYRN=M 1hlb35`0 &BhxJqD_,)kf#C rz+ۏ`D{AojJ1m2sQ"jhX+@IHdOڨ,ecqW^*rF$՚XF(z+Y":|_ ݐx8Ĩ>i=RK-C L@5 !Ԃ%,vAz\[.LH$ yT'n4.Ft%KGP V̒^H| K$k`EP@j3^B@tdv)x)# d#xpFP+/uZC l@la xLstvI,g0:-ZS!MngdgN 噴UBN4J<d cQ `¤71 Gl)hPT Y+}1d( )De] 3ϨWX-Ұ6k S*!%P]tڦ('0(E6'JPTH z}]d@O-t,:I&3URbQ\1tiMDR/PRq?+s !gƉ4~, 8Xʙ! ˥trXF/O`B]^j0'el{Cr C+q3F0zPAbȇVH@Lh,MQ(`^_[C +,|8bu0 Ü &l8dR0 3]z1% ~a52`F$}D8B{"T3h(pseĸ^aTY"< e/_Hx-՗zNU@BjmveBBVvʺ@J"@sLJ@]>Պ@!_+9BQpBI1Z(+T-C^l-bZ3W\`4Odg:c?bqQ/[:i7Z K#fEy' W3FDEeR;H 5}  t$?!\,oY#9sPϽp^)%q%ԅ7FPٗ>sU.FD ːPXߴ̌pu#-tr}C a4"Dp(Jx{` 6ji!$bU0]|5QTm*0:Fff%I-bJ8 $"0dA S@oIklץ5HxXD3;:R5?®Jj!g7[D!h;D)*5V 1_ ?}zc;s)`E;(]?fWOگ{WjLH擑oy.nBXƶG']L^uJPY^g63,|zqp u8|l'׷jO/=&HC*ұ$<&c BE'TO}&), QdS)<9 $²['X . 5es#d%ZPAcٖ*Ч6%B'!Z;S7y,&>]9 Ińy>~Ud=a?X JK~__=Q$y9)rnC> Ch^>!  'Z09o?xry*񨬾;'ܟh"1Jέbۏxnmh>7;7bAaх3OL6VsS.(:߈T7*O";&u5s>7zԄأRhN\lZ88AVerY<砜Ҷ7, 2d_|obs?IkO 8Hw>jREFpABIqKy~<)n)n)n)n)n)n)n)n)n)n)n)n)n)n)n)n)n)n)n)n)n)n)n֗KR:Z*ɞ*l߰V'J+5 ~2t3 Ӵ8^NKz%#hS;]- HRЏBK}HhQbnzy: r'tͷ^A3^v+s=EnAnQ=Fݺťݥsr8-%ͯ028kNlr*az.\drh! ⻷5^8{3oU~_}@W>{7cg֘E~:;hƢ<-{l5|8ןsm8Ĺ&5qsMk\8Ĺ&5qsMk\8Ĺ&5qsMk\8Ĺ&5qsMk\8Ĺ&5qsMk\8Ĺ&5qsM믗sm9qaχs s}6kSǗ\Jos5rm]Z|25v1?+k5{QlTduZ+:pw*1!]/jjG/c={uGzPڻj*}_W+b.[comeb͑%@8M, À΀iU3hg{UAEpUG2ձ`mY[B)PԊ=;8qa8jy ;(ͦ/HhےK9:B# Nkf 33iGmQӰ63Z^W]E.Ng}?G5v)d|?!vpܛ| '|P}=_ANy|> %5z}:'/p9n_ru+U1_]{v ʿmiEE"SZcښ>d:#ZE6idlQ0263.Wi yƾP6bE.jf|ۤAӋ/xHg_UwgeN! Κ %4el2 %.q61 )jؔqʫl϶cBrqqnf;gy bJnFm8j+ڔ,yeepu\kaC 8TM-D!J#>6jNs̐Z *EɅ%#p qD/SWsņ863x8bK ǾQ7""E;gdJNI8/LNddDTZAyfJARxӈ؂0"63UGE\5Kq\4).^YDV^H&,Gε6 ~Mo*'2_E]$qPa37pυ'aZc佳bt50UX#1TάWGtїeH3{g4'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tA'tAztosjͽVǏZj.U\*;˺T_sd۫=jUEZ͡˶J9ɶgY,Jgπ/J˟DX\NtkCnj6ҩ<-Iiܵ>lw-M}{"lik% LZp4xf|.-0eCIf[H=C@z|] -Aa7PǷQ psM]t^uVJ@sjNN$ty*]L{T/>_%O54=#P|MT1:T{,-Hdp5D6AJPc%+QGjۑِ$ 8(HC}KU=(sRJ7ͫ%r{C÷$a ꨦxy\0qS_N[gri?gLO+z%\1^Mr^BTՆn\Nҗ3~5aCzSOH!:_fpVgNRNq3?|kq<:޺1˱I->9'a_od54<?xvgLץ7]aE$Iռd:tC'>2=zGxoe-0f`T$.yEUPȊ(t,l|'t{ƧU62Rskώv>O0v㝟v?_SlPSzN1:C::Sggu&ŽBV6ݿh=O֞|xkmȲIU @& ;/ zʄeI+J8{o!J%Qlbp]Uν}0\#dŐ׊c*JU%YNǛQJxyޯg[f{.W|n9%O/H?SKg,Xw6cDTV,ku̱xŭ3*]װvһݴ 맼\Qo{U쭼{hȾ!%< 'g&z_8{pw&\]^vo@jLU h`hjyNmgFc9č !}[ePnrJsG?>M}IځOsKL8~hC9\y;~!ϥLnrsݻ^MC?_>9e#}yTrmy5\zDJmԃ[($Ǽu.٬-޾?'xE~RıelLxmQZX2V])F}\/܏ Μ߇²^Jj_k5hB [TH\β;uu)C[gpwC788VfCW{삗vfet_%D(MV|@Kkq_i>~rҁvj јBr hRqCQ^I^@ʳz|_3t//eɪ(R%j5(쵳B+А2RIʺ(Qh hTҕJP |KHs "KJ] 5YEe; klQ7 q>D QT]p;'9hRhRnfΦqz/6u*u1Mms=|7L [\~(}oNTj'!uE~j(BUM)li2MTyOo L~%1g,jX~lFb.;n]ɯN杌21Zu4潗L,~Ua cDт`aPn&Iq[~ vfBTuϢ14rW~j&+-J}}LQX]]~m_)l:64%I\|}XlG ْ7T: כwd^yqGFMO Йb>=o|s_|•|x>\?pч3o)si7>#^5uwd\ԧ&]nOJs^?>޶.me|006%{ZkMr|ߓ/]%;3`dS]6'͝pcܾ\n.$~] Oj˘--gsp;pQ]!wVT鳩I+趯A,r?Yޤ4ُo $CpA:f s^{gW\Je~dƯ~Xf/*xHg25}ȟfLB>~[ܼ^QJZJ.}huʐh/3@`>v ofEO_וŔ>//I?7be o?e7gBJ9d'FEsҜ! DZa\nWuN bbp-(ThZy_M8]iC>$ m V IHڗNjWXB۔T+X^w:uֿ[!&*KiU0l(BZd% jg ^r?^lLڕ<..>_x?嘀S? a.&I흔1)U䄫9`kVgR+]vh'ݩJZ6@%ATU  "roGE)WELJ2 Z;"=G'ѫZ͉WR~XWO٦^4al4Vk% or$?1[p`M'elj#Bظ Weu6EtNjoՔQWU M{^Sdmj4JRXUVVGg3乫t:1:A9{m9 NiSVWK+$8@n0(6*yIzӜY\݆; I 6"&(s{'@1% ^]H#9!t:X`TsquΛT Z9Yd}D9)O*nH:$SRQ( *#Yvqlyq~:M_=)kBκuay4) Tt`UJt.7H>u+őQ >  ) E.Ffk)a !e8G=mz0+~9Ib GQ9a>d ] zC,]:(ډ̾q7CҤEKh`"ҚhB$G8.,>C-<˒Oר֎:'IQ[;Fw܁[#-6 h=F6knvF!ĠZ,D=w꾕?(܀ކ(hU%;iEW_MJt(=$U;#89Ԓk;ƊMG#ߟt s@w`eֺB;~Aʒh4sLNDD 1"٨S d%QiRV sƌ +v].38.5".t"@ dj |(Z;JaMēW3.},} ;Ϋ"M,XeEڻlu[ȉkF%+HW^htRMhj-DqL-|q~y՛8&H_f`C1w -eNh7L Ur @6> 6bCdhJx AY;ܰ-V1ʹUٹTt 9 d,^X9WФDMfM/2@ח2 3zዐ$~,(ܻVda"mEcQ'CHc-Z o>wkUNZdCy 6ٗ.g||3(vm56QFi),> ءL_ /w+2J+c_#ub@tEv+ )Hh;]1J?1Hk_@tGW Pb0tp ]1Z }+B)J/ݐ+CW W t(HWHWN)d `; c}ТP}+FHWGHW!5#15C+Ft(GQҕI]`*)BW{81zXy}5=qע,xUJ<%[zPh$/MSPe0OD]gǘo`݀e1n643x}9MfE|.LSΗ 痟kQX11;RZ@V_"s;vhlI?>{Ə+6"䇵6q~L5LЏz'AL~M.ϯfz487ߑp6oRH46r84p M3}?Nӌҹf@tE3+xv(銤UC+< bC+BR{JWRʐeC+d 2\=S#]!]1wE kp(thmQtu"Jg\WP P㳑zUR@~3K\Q^NmtbkrI\Kt_[I~mEÉ{vkGj_;O'Ww|L@ڮj|ǩ-!ᥦqp W(wWV:P&㪇0plp%],GU" U6f\WYf9rˬc+qjMWqC\p1lpr+TLW 2+e %QlpErqՐ:Hɸ##\`8|ImAZiqC\9N ֝'H.".7qC\ m%qQlW"su޻=HCAS/Zntț9Vsմ,Ѯ DcbG1hnʏ""QF9iO Y, |6\l6Zz1ty3-M6R_mdTR8&-|S_DC.8kLc:Ja:N%$VEG`gLԙ g+\Z/ u\J3z+%`+ F]Z >u\JqG\i>]q \Z#St6㪇2eppU'FՂ HJWd\Wt`,XFB'ulQ->TAqu\g)q W$WuTVZ8 RiK`+ \Z9N0ÛԢOᏳ[ƕYz??7v_pumU=v4g7~U>w^_mt]m-זuKQ>ofX Ϊ-/;?Ub֐ɿ9rl$tU&( N5g6~wڋ^Q-;r2h}6Zie蹾9^Pr1xRDȺ Erp/`H8@z~5Y6izhpGQ;zW-~^ j>o|>{v1OZ{'ZCA~/}*A>Fk8(TCl.'%]*tp$H;? (bq*ebG3Ŧ+p1=7pݑ@1}V24 ±46&^AߞҤRLRZ*"; . ÄTW}ĕ"8WJ|$ JqjLW͸!J1 nNS5 HK~ɎTqC\흱p W$ױPm+RW5`, o(+RD":#1r0&$\Z:Hʓ>ʛ g+ Hw\pjUAR)CUq㪇>8 }kqEr & U]WT|bYoZ YA8JTY\]*G2)r\3p35ec1 m&O2dk@Nq`ND:D>RX-}? bEw)f@9:X/z뷭}ׄ/c5RfTiUB ?;\FBa_9os+q=iSF=@'KЍԪC7Rirnp$sMe6J8Qi K4 \̦v#Qqruǯ[Ժ02$*闚]:W6q8qC\)!̦@rDWq*AJ St]\f8]ǩ4*㪇2k]`'" Ph*㪏F#\4\Z /J%3z+WFWV']J>ʃ7St- $W\Z|tE*}UqSt6"Mtj~"Rd\W>hY&mu"ݚGitͣv0rV"F|622AVVd+(_-Gy$kf:e^(U(^fҀX1Cd}뉵E;Zv? g7uώnE9¾Ө9xoPg~ t&oCCKyy^j`)3D!~ @_o(3[WWx= \voen{~-"ǧJnP:}i`w9S(`:^ыRDMS(*ͳ2b,ŦDZq~z8VMۍ#ypq߯ظKj A?=ål\v-5k>N+`6v{'Vx/(^ eiR4y-Άˑ3_6z(y[5p+Hڭ@vՓ>@l\5c`+%AnS`Ĕ$78.1%]2 S$(A>fEԆ"kH [dK^eǞ|hX ^C>` AoElFi\sTHsu!׭% hy{jQ*C`3Qr*.FH՛'1"q9 _`(h'tO9 Jx_L=3a3IҌ"oțv zM*S#DvF}=}ز 5oL-jZ;yxk\1ɤ7`\ C' *6{]?g C2._~#m/&:cv@: `l^>= l>NjOIof(9Oj)k"JUF '< ߣ 'gP8 jOy}sd*rWߍ)0R'&7cC2c?>qOoܗ0N-]zq\C5{'{p3):Jxk*h== :B7kpRjѬfOhaIIyYVHW$6"RrJ*S; ?0JB+-\܃GWq*3+3W6饖UI5{v8X棾~=gtރ;]#SkiiGWwfmm.TӋįgoc5G׷3wpofv9?mnx֛⨼Yl'ۇ~WĴ׼e{lאp_V;P{ädF ltۢ[HWA!Y"'o>NQoUyK9~zosafֻpWVʋ1+hؖ:iF6ʎ#7 qU7~U59j-_%m! l9{7n6_.fdg8/Fc;Cn5uX5aƽcB *A,8G>ܷG֓|SFEY I;ݽ/f?}?ioqƲa,{rr }Z0({ecFvh*G+YPpV]-^Wְѻj`~UM@֑؃c;-v~N7^B9ؑv9؋v7t=kjqs^Ky&uVOQwc!C%5tPiͯXy1Q :` {U{9QkLz^ŏiO@sS"oYk-U͔cV sD`?׮ӵvi?L6K?'ĸ,NMl^cDY-oeZf:\N[gkAhlOԿUlGǣT a&ꛚ26IBz2j5 q p`tTѬ #*2 YIq 9?{wwm=nX4=S_CO`˦HXqŷخK_J*r(K.A*%W#y.<[(_8{d}ߖu&KL1 $$ѐ' PT0!e%v{f.Yj.5e* VPg\ o8)C) i% -94z?o aǕ}|(~*z\dúƧ3&|¹ q{:ci !bK%HEM-!)Z N"lY'vz&)$y*2^1⋓Nߑc^=peKCM?W/Rn{{rLE#*:XVjKb*Bm!g8QH<#p_ֆ#}Yǧ/ BأPF!d`,:hRvr1σ_QA`8_HMRhQ e0EDH N"QZ{ر A_9P[{5R7}x=}muXdVI혬c:Do}?do}/(^ vWƛKRk2:mz-Y0n^p*2TLYޘYYxџ>1=`͘DùKk*ܗh|Y&~N$9La% dmxɇ謎QX|*hpNJA-8?F4Z(+ rzK1Îk] DQ?)1c'hkx |K?S]QENvV,Ê9E2%#HU t_ ҃nM%a%XI iQ=_)+v Jmj;ˆ1J3eQ8C8a'j_v:)ߓћ~aٽpE\;*v~ 4h}w,f8277{el)(ADIF<ɼ#+_-fG"B&ϗqd|,HaUTbIG!؄5Qeꏞ-B)[Ǐntn?رeCR 3k[17d>*Lhb)n\-~r/V-@J5=n B*8!!P6{gIB1jL0(>oI+_~0NjfHV,FZzy1e4~$8 iK i%h0L)ZZG뚟u &*]. ) rz> !#K7^"p+gkfWyh?Uz4iG/kMdSHTvۑl1Ft^ eiCńժJ+ a`јoc尿7J( {"DR+"E͘qܒj>Srp.r1 "_bekW(eKErԳC #mjM:V ]F cٻAmOݩ}"9bݽm6]{t3ښK}VM#Mrsq?,\e |sE`)r$6 M+DWRXFi كF F>M*~˨PS\#FeJRDaT|T;ǯ=?B4ʹM~xVQB^tʵIU}v1)d2nZc[Ϲ~(WEllժ`8l0ɷg%ӏGjSJu!'͇s:G2mc|1+(n 7 VP3 \S)HfԌK>Gіt{A@+[յ`Ij':q6BLB%Dʜ$ځTƐ0nqq 2Q.X`E{ܼZs$TJg̒,G@%,XfWwo߂y2`(Gl%>KZ-ۏJ;\NS\NtwN{fۉ[oo ]6>t׳TМtw/s!p L"M,c)lLf`BQL/N) }b{he( jYb40Ŝ /RAeDS{bFf@ FH PĈJ5{^6 ֓-~7y{]"m[X%no_{m*BPݕ#\N~+~xx&hMZ|d$]DL\.?9tw8mК1}w2]+cX4@RԂ yN$eDgUAeģu!U,a˻2;OwZ3ƻXI&&1:ƽ5')Bm;sܾtWEKQ3s5OݏD'Oߧ(q9Iw̿e Z3Di]q:ioyLߡ~JیK57}sJB*/hOGiS3ƻٰ6]q$$(/~ґ|-d1)c**L&q^IpRnyナ RZ. #d)! T`?/F*TrMAG=;bz<0"v8W׮li9n*q]HU`%y[xuXggҍ+ EstN WBI !jFU+JTJQ$aR4cV_͉LL10e%ɲ3p1c2KH)߃Bmf>l^x{Z}NU3CT5)PtgKupe-)W(MYxpZ%aQK) O`8NZI65 +lUU; TCچ#AZqBQlz9nw*ւ2?=?~ JTC#8C KȊ#I*j@wZ`6l\D5߼^le3O bB7FuBO,UE:M x~~\8g$DL;^wDJ_;BŁ zY)$,XD<9r-?h}MfxTn6W1%a{VE`H!P[ ҍ_TY)[EonIZ|"+hL^GprQGCPyQVN&@*I{U&{|bA cB'⍅qv$,;b1[p`3*Uy[^熥 toJՎbV$Q)Ֆ.c%ű\x/WHWT^Lm J"%(ӈI8O%X}۩BO<1b\=~a! ZԒYڠ1!%8G6Ջ2sP s- ( hdێ6] :vD?|@"3z38-hzM&xaAIipF&mjr!p'Nӯ: ZU~9#zo<ߊPˌJ.ALP#ZM4\R.f6d9 sJX+f7$fc0] m.oAr%[x^NhA`=싨]@(@/ Nu V[t(m'-p-ֹ9uQ(FsM@n4g$r3pXNeOU9 P53^\%-.K0@yfFFI@KeZah]f`P ,/,A@  ):ϰo"Wxa i׾4] L3&8A PI$(bJrP3l}]C3IJ@'su-Kw!D]cq7}O|A=>֕a$efBAA#!8ƌSMvPk20#ɰI%A a$ɡjؙo2dVaD/⑯<\I*kQ@JLpCc߬TAeSf+ Y[xDz:nR8 - 3R!ŗ={gI˞Qc5P*5NG C7 ==z d,;sÛ-/es..?GvOd\hv`+!yI$-eH|Ɠvܫ_6iUn"rCe|@=:>4~*]Ϲjc7o0JҜ*{|\ze-۬dt5O+nXltQpw:[ER r~j~xomE>te% OCCgĆ7`P*jf6,MD!/[_̞C2r}T7-ҦoÖ-nIVazJgbG}Ҫb-h9R:o!rr[Mf[weub:\$7>ҁ[s ]pzj.CJoIQ8_m0Ȁin9{^doi{WC"3CQc nwyN9@]3_{~|6/$P!!jdetV7(m&b1I#}c)DP!*3 E.z#լ+?ɲS*1:zNJ:yz^o$z2\dU"(h/BxnUl0%p>|dz\mpky5 mcvL:7Y2inhܜl%wn6D k[Jsm0Í_wl+8Bv<"*Ⰸ}EʌU;I6*MMfLVr10Yj-쯞vJ-9F9b( o2)j{ 8<4MA-HDL3.cTT(SNպaN]qj#?+Ƃʘ2 ãbKra= 𥿿'oY<BRZ Qp&v򟁔z7$!nFaIƳJKX ebz4z; ?`嘛LLVD(SVH~|y8seNh kx?"^P`9q2 iN0JQS7'`b?QYpv/bSw + cshGϤ*|N8DV;ϙ=#‘SjlFJ}^`HsP^udn˂zD my8E%_ij fvt^$`^)ED~09̀dn:'58b6[ذP?wq;f'ڗ–OplbQx(=vSykX"MSceB63+Hks2Dɳ:ƗIcng,K 3\L3GR!y]s!wg {Q0" s(,kgaZ|6OYK#23HQCy!Djr˃vR"d_sN{:J:J׮Cas#IHm1WqW9j (קV A45) xg\cO0V-amڃ 9X=LP[pcu)ˤ*Br%}*`a$də>PRc=R:_ |6M͞QK[ E1?{%f<ٌ{-ݙ!|<N<ʂ:2[kj6+@pz?0%NN{ܚ?[櫝d%q{2KSrU"KRYAúBO9<\ܴl0*ej-pT܈&J$Z9Lz{eN# /%*`(:Fx7e"V ;[38D0riVs/̀ >'Fނ+0S,勬Zy$+P_ًe'$e} 8KE p@5/j]Tbה'3%G|QaD[ DdƖglmi|`t2c($Rx.8I둽J{ $G"4-+)4JK[)ihT۹z,*dy9N/%1(s@9@B(Ņ"4c Ox*ifq@dâGY ;&akp[BhSm)0%kDۯfx]N z=eOZP5@b0WDg0rD!.vms1tkd-)i4IR[bc ];kӴKh G{f M̔jN)l$%1_U[oZlp%]NoKo[jv/P1H*FoA%2/ @أZ.*jlRA喵|fH |JV 2*!*]dI3 jw< {±$ <"l46F1_z$!Mg'D(t\%t!=hU;8cDz|pS31h@F``BƖIy3^(aI/UokݟG{||q𥈧`&5ы96\<9R$S( 4; ]-uV"C0u.Rg"e()9hM"fQZA%'S%B8Lu:I'}b9֒C*5bg.'D:ZnѪߌ;ÏQ~eHoi"rZBRtysGP# lfI -ܭ;P C'=<+A1xZMDžĪ%Km1]88qiq $HS R$И ѹhGZT!r, Km;C ,cL1@Q$ ^935/!rNuP<OfŰ@=xmj 4n !m1 k5'#FX*l횝βXN[얥j^YsRoarK0Rz_堮;:/Z%|,zk Qy}WVY$^u=,+mu0>].JhxxYOTZG^5qԗCRi>0R!a(+2oq7Q͑^_"Id -y!\Q\:ݡTBjW+'ieޗV ꀇHml!3&xtCf{CUŏ .^\ӜH Y0(ѹ1p4 C#5<_k~ WY,v7oA';/ rH@ZHE^$d* Z6$u;I^FMBmBPEvy0+vZ`Ҷy>B ,I%HQbL#Y9Ȃ4$/ )(G>RN;fCSVV+/Z}G /l1(fV/F_$&i9,iw{0S]ZE矦Iw12V&¾iUXE6\0;)# V~l] 4"BF\};*Lhϗt1AZFFLz<#S~O8ŹϴVT`T ِ='vo ? g| &6 4nQ"3KWdFON* D [ ,Gz~+@?& mХnT`ؒ/jKI^ZoS 0ؕDϬ*śԾU3qM}P9 6cUr~(3U>lGTm&ɥs~8d,bQ?F6ΉR1#(0s+ UrMB/H,$k [,٦$ml B#%(#}>Ǥj.Cqn@q\mc9FZ9IbIQNSinajxVIf2TPG+ [ͤgr2N hJҷY²BnUQ/vx 5RYY?WVHu[J.[($W`G9tj3YG05S#HDٟF$PRD 6#jZjKB[jL`aհZ@c nqsLe∿SJD;s7k/NVI8nt_a{%X/E^8j' ,U. 3*:GrF<@QU+)W$Hŋr0 4:so0ePp*aZ5 WigASƠ8&]LA9TH((ڒ+lܬF5d5q)x[0wwHӠ)cXgW,tLu)*> "K]Q%$Rv}tCW3o5!uy_objt, [e8ٕlGC XE]kCB;vQ޻v)3iKndQ2LYh *ACaX\Lrn1-564y {+>XI\Nϫ@e2m E2C&lKl_Ud3*6P3roeflx\Kd.cɈlr7K[=W?6 m$)^y%MwMBJ4ɶjR}F/N1kʇWꏽvzA xXV#ubpnwqFVMh|nh&$w'/lM hhXQÁJ$1BT"-5RkcVr1)m>EDhR5L:{&FO^Tf\KrGRƚ57)T:Xtέ5Uִߦٶ *XC^a66v HB{{\(*-{~)Vmdmd`BU>(iM ۂ-S]8̴5и9_B-=V:_n+gbn`8RRqX:<2 h? '` ~J(sޓdWb]V݇#H`Ivf>Q]-QHM`{^dU752möTW{W"K䔝H%|-j{mdccKtDjL6kCR؜05T[Tne"G[xc#Zb奸*Drrij-|X;g4)5VJoSNWdBie2@wcX٘/>"T)7 B[Fhi,ٲ_[K5];֕F朂ŝSeo^2[3u; ڲwɢ> kE)m-V<;xs)i.zl夂=P}2)mIO./w~u"[BB[\>+CсEm%F%>}*3F7Ƣ-kv]lQ~+ǣ&NgR\jꛥliwCB5MϽ2NWۥ7!?tW| %ӆKpϿ0FH &>nhP VlJen[Ah} ^BJ(d3UBv߾JE8,u3,AEU>HW_jx"ѦY1O5p3̤Dqo8"ϐy4l;a~թ& qVHuXun":ᵿCԈB{drw 0¢D0>ri˃(Wi) @Z Yxr\Q)GR?zx@/?K+cl9zr"]U8'XB^xҶe9cθPa:@1KED/і8}vP-iNmw[#ΕYӘP,DlZ#|< JC|<tI~Ch0alq>(" 6̂mBOO{ ۱w?>g3I5\%ʾ@+e|_vCnhX KՆQo(c87\g`RYQJ&!ʴ,eJ1%˙o?:F#௢7` olԉ]yl1ṕ_AN>Y'kvޕE{NG/?KG:eo H/X{7hI ;_Fh^;vCgnL iδU'LDPL)R``3 U^P6fa/ Ehs60xp9grGF8^BPffXi!қ9jj@] ]1"Jݣ؟'EEԱXP kYw-U.P .| }ݪo1Z%xʬc6[1>ήaҋO]왏T$>H)jrvT;5#aႁca9U5QS5`ŗ(@iN P&ci,Uh'R]$:-@ˈ&.V|?FTkatLxXbm~CT)@׽!]$RZHM%;,qb*C'N%R}rjBG\mJ ^0pCCLP*?8y%Bq  c@%l VO?lv5s'xrVkɻp q~yyZO>ڱ''D` E4w ]FiuP{/Rj&?b[!AR-tc34g"a޲uQXVp1Hޡ-_Jc*GLcȆX $FI`ZNoB'X)'c/zq+#eɽc݃m4{MRt<4T4Q;W%Bt_a_/|S[@*%VI7 3R-Z1Q[&5DZX|Ğyy~GQDz.4q%l !FkK*mfACo}I ־& %nz[Cw?Պ7!6΄x.˛M?ysh5m!{TLʶ!{fןVv-' m0k߇Rsg.kBb|;.3]cso{!b1Aɬ&+i{ROU\W>ČVj5go̕;?:z@Tλ)b܃'ngW"FXu+AVI$H1 M[ÿ y l:ty2ˉ̿5S#xbn 딿j$Kt:? tPh U>i&}a~a0;F8{ w~_G˿v;: 3V5xW؜y/ { F;7 nL٥siؖjN0Vq$v ֺ,;RϜ] ؕ%w2vwFH0m"=ETBs k3Tf:/L S \%Ut\ubtY}G6x묿۴,51~u.n0)UZfdk-mriwFDXĤu~,r08Tc n[Z9Y5&ǟ8$Tȧ'Sut9֘9syk`Nxc}-o4d[O9IAIAbj'%bnPȁ܎ Q7 Z: |hzv$ ج]'v}4Vg$fmj8.?y`uZ[3w8ĴMVҌ2µnb"+QnL>7p3nc 毴4\vÔjmLÂ~~>JF: GN3F6!Qhv? +L]F[/BRFd2l cm0d`ralH\qr=Ak>:FwnܭNВ`x_2k3ML!dN!5h{{L ؅&h.?33̚i LtHMi쌵vگy"[Ik0[w:GO.[9Onx3:K50cݞM"inrZ;tssُEk Ox fVX fZG4 ! -">Bs}@!"݀__6؅RWccQh5]jpόW5؝хDI}֒SSBS9EL29q ؅ gqR! +pUa_Jkj6H0#ڱ'p$b눠8E3Uxk1Δ ; 'p͊) ڭ`yAq-1:\/#s~pppWD2)kFf0~yBZ|0z4*. a^c$\ sG` -`8e Ĥȉ)-$-;cc!Yba ư+{a%nTlyeH]kcGb>C8 NK W¢@aOr[Q]x)!v[²"à(A[Xg,\a\́:q2 a8J֛Aia l:2.'u3 DQrp2X 6 gc2*B+hF@,v^HB! (j!AƦGVU+ReΝTujgyC }pػ1Q~+ǣSoWJ0 ]Kwyu: (TPl KX#0kA4fW`H8XYμR+v"ؠe_i:~c 鍖qUYy2 'G20AX·b# kq|(*DCT \ _^?*{hӣ&6^H)+IU^i/yX醚Ł=&wޫMc-['Q vNsp>wC_R_rb(t~n{C!?˹*{ >]g0wnF7~؁>G|/x|S?71w>.9?Rdoo]qgprs=n$xcG?OS16HS13p;u"_O~X c=v*Z5ogU4*<=ŸoZ-Tx=Uϵ̝Vp$5:נ]s^<.| X: +9..cU ]ۢ' E\qSknx.`is'D 89 6EPa(Ib oMm+VX]ữxDI=ld ־LkcO^uU-X9qʄm[G+>vM!u(q4 ]*$6P`I絜O+tqhRSJNOsU݈V+齋Ύcd}\bssևߐ|0N@( $8[Rjh[0íL E4=Y2?5){ѤCNH,PI2TAߥqL*f2Z as̕+Y/YX;C/Ϛѣe] ҹ"  nseb!C90#[v' ]6XF,kA} |yGuVʩMQO2#$]vI]4V`LsY7ZB0A){ 09?+>o}LMѷIu#0u_.)pYZjLo K&@:Q"봔 %Y{qNt:iZNzvf:q硍Cp ip? |%qsuތbpk=y˲ DIs߃r 妣pgj{:Q?ms׀EYzFm%y{VFvhS{o45Dӓ'Dd[/&?h֨Yi i On#\W9GNBDNJcZ{,*B(M_ZQVҡDɫ˨("Ҽ.|@.LI,\hA4uɉpg/-lqޛq7+q7c;'BoZ,%2̲t1Yװ ̥\.Ipv:wߚF5ӧ_!:IHT]lfJIϚM]|3Jd2$JO&Ԃ*3X5w F<ǂci9sP)߫-o*ifR%l!lu|/_ӹfIB-pSNfle &:@Z."o-EjbnDx __T ۼdo&~AЬ#߳WFbӢ5YDvztPD[.iT[U|\ͲX `h-ֻnZk sDm2J=R12#>Q!ϮG3tGߊ鍔Q7*$xmZH[Td8n#~>!{EMZ+# %}WOk+_9TJ^ (Bims0i%T%F|%dW'72OQ)\BpgҨ09%9I򴩇t/qP6&g[&_rL}.YI)8k4OTAƫlI}_M8 GM8RwHN~eo٭P1L aB:iAݝU9Ojn--qF YX|C,RנV?~~\t.s0ǣFXaQlEa(ozl߇O>]O궎J )^nߗ>DHkyOSE$B3dsSL46Qe"e.S:j5yTɴۜ ѰsuFS B9' 3~3sf^\wgDK- (I[#VwmmJW Îy+ʀN9`ndfqKNgQɲ%w. \$"U)z? aʁ{@s+'U J@$o Zl7Bj;*PxGzc cӡf$)X =_ *grkp4Je`}$11XQ^01iO~y4 YU!j}ⰂJ  `b^kvޜIimLlcNE"&K$6;%FEGU,(l+U0;?X!)#AvE5, b\dq&ZZFx@TCkK&&M5HNfē8 j)20ĸJ?_XКlf :ɽƓ'R_>q/ Ocy}ABYj! RKh􅮀l@n21E99T8L 7 $\aՁ(bĄclbPY̅X wigdzh_*00r+u S޿JlL~ G!Pu0u7:PX  Wp?K=R U`j<"k^K}`ʍ}<~:~xBAm;O^yu~;`[ITtjW)"0OيF%e2ypS! Q(15mnԧ ͹0V*?sK)9#)*AKNMQ.R^COC4gw!k'}+bUJ)T2za0eTXlڙbropګJK` RA6E"0>)eo >|q^cҐS߬L jqIo GZ' U2V E(@벎Z(9]Ҷ׊y[Ƭ**3fLMSע%Z[~+!yZ6@;&ca!و DR` m&+ cp 0 K>yK)ƷDN-Tݠq~1T}%'F^hn59ewV՗6+vˤR~s&~8Ц4\YZNOɍ_sjDWzGK|i7+. A]a^>gqſR=L!,FNXoLлxY+i Ƨkώ )ߕN)Ch'&o$Z4q8o0ojׇWԣN•wGS^gt^NCpP!q1mD [/4%|qgXBǖ9ho:v `vݬ~%kM[WQ+3i^#Yh}d/>a˝a 4rWwTC##Z6lDxX%8-g㦦. =;h)ƴ @nM\HFg7O-O:r&;4Xhᛴf{?#}|d8gY/hӧ7%V6B9 < ppv6BrlF=8P0@*n49BV9& g>Q۳ٷ;fEpw7)Rąo'if[Iyzz9=Q $4<ѽudvݞw fϣo¹F+OsLr0^Ma4_j6dLPV\~*H"^2u͒5&KNH2bJ=9z9sZݯw +vu 9_|rg%-n_[Wwd2}'[ Gœ)CoGH-\pBwL8W0"=+>gNlϛLZ~ͷWgg?g*)fݣѓW^cZV}w _h@wD"'A3B~*їCtًt}Q@V{TjUY..T Q\Ĩ+*8bJ3FڧIAZ]T4UKQ`x3s\ԪZ^ڦ*U4o@(l{xi2mՆip#-uHf"f~#S ij 3BP8+8kv3nkF[l}#!n5XxqV1n]ҁ*bCC,)'4k1pջZ{1/oRzG_PG |㮏 <3SE"I5{py~׬os, ?ʧ(9mxp&A|C(zM\5}%B^>D }qu0Ń472Fq\{4&[4hBRB&C1T@4@+9?J$R^w X직-;0}V>n1g}jq\S[ҧeC7]\͏UpVoo{q5>q<ݛ狽Ջ#<{}emW35ې˟Gcpwkkihaao}gp["Xͨ}vςX'}[g,G7z|;[>XPߍ5-TaM(ΎURʶԥ|5EA-؊(ۤs FTf.cL Q` ӣ k yhUt2-S708PEfL@i5MgS+&4.* d U.$^# wuu|ܤQѬO~GfOJ` Q氎v``ٛ@NeƳAY^}[ji_l92$oWOV53 >&\c{(YF QQ`Yײ-,&}̰, v<40hq9}F-i˸v%< ޝ}ZᵐY F#\h9ni >t3ٴȤjB$J,l긶]4yY;S%߸ /2z( 8E7;Ski%|^&udaJm2[4C!B!eF'@b j`Kk!Gz6[퀗#S֯%_zB:U\9վ!7cI-\-eVM忧AIH$E*m htHȕZmCG2W}Un^}ʊc2/t]X񖢏_iژ9Gôӊ@Byz7 U4[ujo&/ $29w”eB_kgT3( Uf6j@j;$-/>kj" س"uPR'2+퇨UjbHA-l'Νۧ핝Motmݱ m\?y*6g3ozA Jcv헷үof|^⃨Zfy=qmȼ(Ɋ5Ycfq'Z,fVW`xg!f!5cvz}كTB^5nd\f5䙄y1VHD垐Ncq ۹۷rl-#ؚލ\9Ȥo-Θ|3ƀ/Q)Qujȳё}AZ?r'z'3FJXNhDN-EfQ%..$kzhtgou,$vD7%{=oŽmaPk-f齨|]^T/+Z&[iv?{}8`;Gmyc;W\vJyaֺ-OU6Wxϖ%}9*zyP^g|+xtP0+ c AϾW0"ߖw ]i]׎rz3~J7Nu&}Fޥ^n~Ƨ>D3>}(AZ$03f|:* 1&BZM;|õln͎a&$ =!ǞkLP](V3j|P;fhɤ[L[|`u;~<>E6BqQ?~*.~nθmqZƅZ&vgY2 N~{f=t-|WiJ>!<qNB))Π5,Wy&c߷Qlc6b;Ҽտ <{!v…,5_]>QOq03ɏs#Am_H5IMb90;g|}Zl. {#Vfjȳ.`D yca^xW|O)ޱ?L |Lϸd>Gφdɲ4V ݹ膈#O4H Mt-9eLmط /N ^)eS;[OHwTWn0NrG~,<ng ta\3~bအlz5M8t"blv ^eAv^u\~v=%O+Y\_%\^ l{,#;ycM>)u~2a×ZJ(0bzW9hKeŕ՞<@sǎ!rۃV1LMce99m՞V f }.gjo]5s ̉<\Lmn4uw+1=;z].lHꅒa~:]<(&V XܹDTJmm6QpI4rq`bݤԈz-^jD.\5چA[:,q+8RXA7^[ 7d×үKWsV?J)׍_(цBIsY/$]䫳D`tE X> j^.A1 \D'i){suN3*L}2D^ j탠rRo_\.Hzt`r{T^%XM\4<O P⓺*4s$[ono6z^W#oh8 |yDaOIysI/-~r_27ʆDd ܵʼn<_f^˿ 9[ !Rұvڠ0S@՝lߛZo"m y&K. &h=R&kvL7r xGTl]57F)9h5SWxdYsۋn"jk4 \6ZMଉ5b-cLQ,dSILBL$1PLBH~9oxCh.I(K>i"7Ƭ J^ED.SmZu(\c2 ے Qնr- ' bpHWaj |YIYkb~5وyA8nؒUr˒ e㉧ My}5,{@SVCI`YBy 03ӭ9$EsHpzUxjHT/>q09UVkLp1U9-i Yet/-h2jDgDd7U:(#9k+q`jj0l˶ŐE7ax 1z|Pa"|XR U{Zkɯzuo1z7n_Uv87Z}; n؉x$+vq+:<d Hp@x^_dt<RH.?B e2MXz7-4\A \C-Z(8#q5DxTu'Ɣ,:5c3}^gHjbuLhPnlh7'UC?޼~ ;^߈_ߖs"u3xq/5C& T U*&"OMc >qM8Eb%' Cܛ# Q|HEЀG{ ?ah7:w'V<E .D)_!h&n4`C1䞺Wr0pF2$搙űZa76}޾nWC4z0 2s ^l'lmdA9ƺ 1\XG p+@ ]z-9euo$[nAwZUOp,,pU$Y1AT&3oUP gWEt#S٥igZj)cA?,vb+SI/R~ܠAl2ˇYܯ8C̗TK2 %jFV+UCHY4Y`]4@s(٦1Irbn&4Nr9d{"oܺ.E]}dؽO~DKvq"&vUO*aUo $2.Sf|$pJ ')S?x }C2f1gJd̨ňZڄNaqݶ"h+'d1sNꜽH֖LZJЊNA maiBF"unFiF5 Q^܌r?yvn2D7G ?=."AĽ5TQ7b^ EFNKxȞm`@ MM$'UlBɥve xIrJhLИkEgml"4lt\sĔ0b '-UB#zأ R|D͹/c'G&[-^'cm58Bd3wurH/̛T=fL9k4Su3:2P=cgЄ| 1͠.mam;Ba/p_gJd_rV^}gB54Hᦰ٪bN, +[UNͺ6Քf @,IXb(6p1դ(S |uh]Vrߵٻ޸rW~\ID tuܱt2 _ؕ8U>UO#U"}Ea 1[/Rob ņжž79=ǙJ_ko+tudJ™p%o^뱌X޺{5D  CV`R$bnw\qOѸ# >΅DD'HdOk AuL:߾wu'Aj }+^/9) B w\._fXSp ?Y^ʡͶ,o6[AhԘVO U˜UGO킛# ;gW9D;?ol^׮G7tf%&}j|F]}^RuC+WTg&,^RR…:X܄Tws^xJ;5!׃Toԋ=Pޱعk#cA7E?;˛_a֯7>[YN nm\|$NT:70It| Ȓצ1n w1i6&a?8>u(@rfgwBooܩ[Lnam0uʊ͡! Ob]"V PuS󱸅`aͅ񼡛k!{Jk mIok|3F pJSyMUEN]v y:5!{\#`>ĪQ8뼛FBheIȄX'ĺʿ9$RqݏK?.<;{옉jYϿ独/Ϗgÿi!ݒQ#^Vpg)pi=5\Ƴ>}VIMtuWq^ƽQwuoox19PNjīCP?+ס @ճ^|:})5~˧~&{\L;ݻ9z"x8q/GW~>` AG(Xmw1+@㐘 A?y7Vsw{U8f*[6X9Kw?1Mk/|Su[Qh"{ isZ\ ?sIORX&`(9:Ip=>TB:I/5l [Gv,| nO/{.XRP8 2.BS}QA7bQ|v&%Tۨ|T(zUb 1"z G/!ڤ?+9lcD@ܑwg{V+ oFmnų-=ƖEAM>7 $6h3…-<2T>_fXvXf}zkLSJ8Ν~Xӯͥ"1HT6Ձ{+<-@Q]7~_V6ցNK ;?g$,`{.$oo.?ezxG`$:W¬dJ{V;6撤vئ <l-Qv6[G[fӌ$j5V IyM1`` :gI d w`Ю*|{aD:!kWV lQf\ r0!Q2x`9r}`9G%.G0{u9v# a4ʀ@\{c !"QN@o`$\$zۈ1]pV...шe] 8{>1cM\lWO/,nՖp%/vַdy@:3 ذJkZ2!ac6>L͸,Dת"6*@538&cV%?XbnFFĶXJLbvJhԊ[M@%Ye@/k|>`' +6*f5B ^ZmkYHU ",(Wk!)Fԇ̊fDUz^Im;I Nwg,+S|}cNѕ}LJHt?J-#BgKrҽm]J񊡤';JIKTR|2.Rj6|<gtehޛ hv`AѢӚdS5x8f]jA縒QV':snn)C$q("ߓO Ϥ<ԬaaT*}=pEf)J SfYƿ7Ϻ"Rf N@ʐJP*RL:]$26vFj OW [yF*ZCQ#)j"0X~qDjts]Qu ط.2D6AXQ<@Q+Z. W\G[6tE 6+3%h~L״f8,0V3'Tξ+P`J܊-=G'm:C`j()7FؿkI*0 [cmf %@kq3TwN 3$7Flik{X[}W!KsYSL*/XV,-JfVh|Wﰍ3s)xkv]R߬:Nn?(yXrdu,Jo@*#^ g16 fӀJ kUb%pMʳmkujz 1dOPN r b1yImSЯ 4g2BV+{љh;75& K+7wjgn8Ü$^w{ܲsx̳.be~dC3C?1Pci*b?EěتZjrEr63u<udQ?$S\([9?VU^L& c.bDMXEkI=jw5F[-Ս'zQ-Q`/N _ӂv:!|򹳅R#IXK#oGڌJ)^AD3~5Wmd(-EbQ}]2bbBU[ͽ4T O*[Qpd1{{oIf?}4ܜyZ&-ֵJ" Tل!fMlR̼^@iv^=E6mpw f5kx Rr2NleK!%> NrhKU<[CCq U5]|=;!l\ɧ)z=AqNX[4r)RRٗ  8啾2[pew _BS7*K|Tw AY]!1 @6F˳Jy`e୸:kk0.ЌxpIzch bpcΚt=D:{Ц"䃸M#ZT L<[?%TSإ.^R v!yN..WWK%,ҀV@/҆c捖#C0ڛС]o7W}^Z&YE(ė=I@E=.bHjizFiHHU*֣$D{4+ eIcE[N~P}` uRT ejx#9㢅z̐n k ֆg Μ:q$ZZ gYzKzłIoŤ(&پ -7\|1BHtSS Y`Ux;[2U5Ş\qT>%7GVlN6ܦUmo$Y9R|OlEظxJX76M_moh535h#)-cU޸VP j]Mq4 K\04&: jPZ Դr 隌=)Mc$HV &t-*ww m!Q ![JN S*R=Ƹ\c67X5N1xtr^-|ӊB֩uCGʲ>`MP?n6IF#hk?N*_r.`uGlyUPe,wO?̯ߎ.WGuSΈٜ&O5TCj/.Ns\8 ?П7>ĺG?~5s?8Nn5c@wկ%@6Cy!J-y;g߲#/ K{+׻.Ƥ<ĆACLwho1od*׻?|ίx;Us@&7uCTW3lrS*jH*K=dAm^"L3Ӫ)=<'yŨ)BpĊÛOJ6UƤ`q3 :&:IT[ΎWh;kHuN0kNy'8iºcMaj5dLp7'n0 ^RƠZ>rPGjA|.R ʮv"[ƢRBsTyS%Q둤tFc_4 ]}Y b ld`o)L`xcN@%SF k>&?e3l&?嶖v%9rH&?W)@}ܑS K9lhy;{"&%Y|b"4əE0=(lgErIRc(sz@rPÑ_8K~xu !mM~Z*Pȴ$]StζMYL1XUb1 Dtd*`X )A(%5)@`*F% י㱮Rk.ƒSK+o.y!u39NʭXhnH 28NN_Yw#QٔFQ f)TjX}0hd563+FSdD2,2YQ{찁DVTN^.+?/y9 HN&B;¸d3Q!M"hnktgh +XҘ}paXBi<+cCEc Zb}tҙTրX e(IDb ڒeHx?F̸%O]sU ^H*ZId`TAu⚌τ:AÌ9oPJʝ7hj^GS:t:l#HN+iHni-Ӏj'(qt\\)6%PgLɂ}dU( V>;]"Qzؓ;nLnUǃpP8t8vs#$1ŸR!)/ވo$ӎ5 TJs#ƾhTnؗ1T@,&mu&`2U:>lY~.Pg4Db r5v[?#P/d%EuB$pPj2Kc 1'뤱;^txt b C?S,n6rܤ ~<;"r|K2~寮[ ENG3l[qlo61~ fAq}#df#*[EH@*Բ1$AXi&˿6:&(B&&Xbz8J !”Xc?{[V,fԆT4) W Ɛ_+qJڐxǩJqL%b' ߠFUO)V8+n4,2?TIny1 WSNBZ֭PrLr,߻ђRI3? oQ1Wcs *6rj76t|N55cF4cV1Q͘WtS3{[UȞ]vT7AwGZ9iW}S{v^U0/Oo5Z=qY{?٫jz5{5ͫګq.NI$b{":yN)ЯC3fGh;nG׶r=:OKkA#<*ݘs$f QsJI)5pvoZ,k)&_*Ed֯;#jȬN#{[/fwI9C`Xc}S'z7tCl:Y&N:W=ߝIJX5UcV`ğkRmAﺠ߿䳣Lίg&}Π6dοdyW4)uh dF^e L0f;2*v *Y4X=;?sJj.,}uŧ& g羈d)_wm"<ßfӟ!,0mStxʢ9׆():!̆52;YWdP Gk ^g&4<{yM惶H#2jO]?_@4Q0Rު) #SEr vO~U <*֪\:\@G` G`z &od0⅖646IZVj0FMIK[[A+:sP>| v)p-,vWSJf{45{ءiD6ym+74qМ[CʷZOkNfkZcNZ;b*_#?YC[teHv5TehwAKڽxP]n8p9kpy|n|BeBx$j(+g%,=Z䧳Ome(U7κg oVՠ_l OOY& ս;;rO1TN;TK+oW>|U{/ U֩0&r4wUG9K04K0Li: pky) 0lBmtNߠ}6,- b*υAЩ&:T4Cp۟=zXԇ*џZAߎNO+ܽ7fKGewYq4#Gκ,uj552:cM䄂j$JjfZd[ȩ#x$t9"iXx,ASoفn}@rhc17zC?Xb,X3Fiۨ h)Yd["Hˎ {9ӿ}'p|>logv3m_Q_5#XTk'p2Fpw:(;9OX9KQ*Ih#?"VLF$[Ή e'*Нcf& z{{fR.&0(cBJa6R 'ocIftUUJx̤H`|oGM1bD#F#|dkQ.DxFENT<i!Ĉ,g91 RtC,M@ raB+g;}W)G_ ,s3j5ƮsV0[?e}$ڥQ:׬0El`"Y zJHKdjQyr:r,=<ūqM:n۽ . *hCC*V@%D+RQ a{tڕhA"6KgI>v@2r1qмۻ1;,D싨QA׮uV DEl|(0D0ۻq45G6GknL9` 35l%9̞B;՝#JɳJr źmիOhIWBs|Tr3[.65<^T TcrHl|<;<<Z_9wmc+?sZ־!H҃s-. w"ʱ %d^,,PGovfgggfgפ6ݫ͞V0str~`?޺?3\d))+c 0BUFL)–(1єqvK69zϟ a+-;#'!؝'D,3W4@ӯePζeŖS"G8(aSJ )[ $VgHg::c}G EkoƇLCcPBD)E J3J(a͛Dd{=Y>}Ppx)ii2ߢ0Ac)O:!4NSC"9$0\"ce #wc(Ѐ͙#H b}9+Y0"ןeĢx"ӃG`7v\GUDQAwEU{ V͹};99wqtʾ9Je뭎ξŶ^y9ZS494#e9E8W0H{ hk1ٺ~x*xYQQMQ;Y308n ƃ h4ٖ*]cuCН{xH<@TJFo*m?1+]1ȧotW˥~iY$}V-(잜lk\MA!Be|P_CPfG2Gpy,8$DQ9NMHjv%4EL'X[#!"旜=Wv-vEy|*0WSg\6ǯ^Gm֌aI$5 4f >j~h.P9/g_ +F迍$\M"Jdih;vA9 T7*`l(|X>逈 ,C(:3OG08v棡aβEB~uz4`HX<\Ftk )Om)[DtvRr ^':CTOR(ѡyng@n0Ax) 1 *Ե^ĄT;RR{+0J)bcfM1'(p%x J;T#k0i JB7dpvGJ*RđTXX`I< "`Rny)ili#bX A^ l" 73[]vgC#K Bx?%8C oऔJ0!1#6M(:uq"cI0"CQb` 0ǝd!'ժ䡪7\A[)AcVU H0p9RJeC0YFNS:DD8TsX7A^P1'U\8_=XksI"T*E2 |XXS_-hQC<%t_ #E4<a!V BOG¶6 󇀑&fe'r!BVJHny@ڇ78UeWj[-.TbͪEFբ!jQuSk4v_J QHHĀiDZ' fbXT%c F qN$䎵Q%0>is(ܽ+P~o > e~Ad֤Ij,+V'ZD'ZDs'T `z?>)Q԰D %Xiqdрa]Xpsĥ?eG."Rt?rHèЛ>Q6xK"u҇Q{gD F/W{cpG9b*$qQ1k,,Th5éTk vux̌j@E_K؉@B(}(MJM?ʔim.k)lEF`zc(v-jonywNv1 X^_Sn׻^Agf&66rf-3uUep]5XRމL% Ud PiFSwއst_R[c.  UƁFLXO> :TA?r`$H) # )y~W<,S6SWM@uAntJGG衳ҫӀ.)?G@z~-K?9 t:t$'T'%e:_~a:t_>nWDI.?4#DCMdGrCG!C+IqS'q_?%,PBc6X8btIkO[/,2آKI!Oޚdp$٣كeʇh&& +[:E/$Icn#'!?@[+< k!\ OyiN ǘkuЪu"U Pߒ@r0.e3l_6)=;(cxѕo]սXq9>87n'\^SI -wy ObԤ}ӫs]u䚟 ƑшH#dĕKKSŒ [U K6ɻ-ي[+nZ`3 wVRSrG=9X BȓSs>MEDT|NO3OqN/GlaWdzy6T#FqPz6N[\Hne.Ȑn&C UDvηءO]Ʈ dv J24G cqP,Q$?[[m\^uyY>=t{Jj59hE J~Śҡg  OD&qTŷF'n5^Y+]ՙ4pI74*ڴZ8De]|N6{L{L.8CR]'ͤ4j@V`4GPC7M7eddF% reH!#-8$x{ ϺO?-J;KKNWl?xtuzMNhО-*p@c.JA:w:4oð3#f3"m4#VvF\acޚX2]bd5*#P ;U@ aWGx3d8J2DbDӶϲO|Z%9'lSC" Þ0]9fRr’^?`KK:F4LaP AG٬?tڊ#ڼkaBڹ*;SPqxMKIz4-(#04̉'$D mYfz1t LIjnpN"bda[JRlj_7hR'r?>'4 Iߚ1Hp)DH,eMhㄥ=c1 \(+ 6Јp!QW2_f5Qeb;q)M+IjܥX8r "QE2 =#-p Y&e+fuc^62{MV>*7F{Ly96ѩ(\28{<1r*nЏ`ccVܙFhGĽ.ZؗfZU*W;!Ӄbe`A{T.'F^Z(˯JGn` ({i>\JJAk\ୠ{]6xu쇻՘#Y ?հ:G-?C69! 6QjX9.$؃̝ʬSC8GqA*Rj 4/ +`ayt}3ZLv?J.d;4^0-i\nvvӸ dvإ"D٬&F襛E7V͕7 y"m\mj le@o%v8R|TV]" Z8O"N?sl#bN(1 PĖhխC^PQx`T~\a aOړ`2>EiDBYƧ=W\L-'8Ǥ4BVn1*Hȗ{"dΜd QȬ'&er|z̛|HQ!=#a7NM'nyypBYnƓ%}fn@Q?~A?WEb\XW문{W'ŧO4}gW;u7/FPQ=$tΖKAmGfTԟO+H tb ?;/_`~l8ߗw ?}< ٟߌ3%,·F_*x _ߺbٛ޺WQٷa5ߔGo]1C7(}VnwG<| sy7`bV\8*/'e>FW7-XӬ-뛬uݏ[8H6f/lz֩/1hѼ@,8xc~pr'aubc8Mf Eҝ~Pz(܂0? x{ʹT^GW7?O|?7@#Fa Pqƣc΍BOBYx>zxPc1fI39koR_g>ѰȚ@?d|:O׾2̇pg@5ʧP/(oXgg:~=~jz>_:?V2_|{.GLkȒ*_/4s.lndгZ'f'7 b%7ĊȂA1mXM#NXZc4秠hu:[$l5aIX#09WcoiOytOoÜwO'9`+e7ޝmLp=X1mwg[6sv3BK}+`ʸnAs?V'Gh9d@*jl1hop?37`ZСvpH87 xuK2/+Neˑ-LRbLWM#`OyC9›krOPv)e5&< Hm]APqyh]kk{[Fh]\f4 uƢ1Q %m1Tk&aR*#W`I+wR+rBYkJkWn), o.{%2n(!"hHiT N/-(^Xv+&p+&7vWo2ZެxKW ~WP8N-TPM1Ԙ1ԿEQ,/h5'ݐhEhM`Ұ?t68y'ḆUD. Լ)d}C;o`MGwmm;2WTd˩M6 62%KT~JC C )fL t7wq5Zm>Fku׺v?,$Jb3ZCt?h-I c\ѣ1"6 #A%HRZu0i>}q_ҵD;8fׂ2L)q,!e ;m ɰ耭*vp-с!eƫnC&lS?gwI=tW5uJ GĶz7wҮFmCAEsN̏󟼹ywu|`>ڻ܏WbRlE9|7o]|݇ټ|NJ4\ybh8j&UrWtJzx'qA$>2ڌ$փ BB͏NkZQpmkܽԽԾtZ{hpZ)U`$X`Zl8:,ou,IiN9A!b~kZt%AZrI/{=}jI|}uqΟbFxwfacB0) 0aҔK-!Bh5AF9J`D)FT[,/o`Pc؇A¤+)ɃN %"KƦ$`G`Q%A[ӥVL(TrrfM(=~$[+c]^c;]yAYJr A{,8 , # LANc#GKsく]|jO 2t=pQ$R_||WiRbDu61zX O\ ?Fj1ʼÈh^z3A\\\uI9k]>v{s/v9VC$i^>X<ŧ<7ZL\<S;ϐPZ20[rIlm.W2%)_j Vn1'[ݥ? 01QHѷ-\٫Ɯ)3&e/լ*5|&/{=L'A'Т~VhN}WmqH E.%%M NH,8x<_I $0@e݃G=Xc{X|.h̸%;d%uVB#' :Khf\=m 9c dFr%h?>@Hh?c[qN~8ڏn?- kH!^%rΪҡѣMf$Jڠ˨լ<X*],'NR8M:QG*L~EKM1eƗ8KZ}4-XxFwp 8VPqq;V:fG!域1C`3+t&nޑ#ԲP AHu,beabaH{0E 9OX[hKi&"a%$Tђ-ђÒdJ܎hIdK~a mAFsVHfYbGK2͒\Σ%.GR9Su Ԙg_sndAJג`z=sJ2-6SƩjɔ1#aa )GP0 +|ɍ9X)sc"\|`8 +pAX@YaͲ#"`M's9U`jzTW*.>`SLl5uN5ޓš9DK5).)AYWh@dN(SsߟS5CNkZ4e|z_!gۢ43'[.BЁ,jI: nKQ.uOjʬS"]^rLrɭx4 0S*v!&ܠυR(,eeA;ȡ㾳4vzZl]ER[5SxyCJH|P =VF]2k-,<;rt;%XgL]pj!{ZCNcui9!u9F:@<a )WJ%Ay%Ҍ#o<)tFk-g!p*m%gp$ c8gNc4fNc괞Z'dBH ":Rwo&%Q1/:X ?ERY' TL܁H;x6M̟qR)~-bk_ZԳ_ڔXu*TE/$Y>RJc(EJ Q,8a[jr@|F`HUxTZY` [Xޤ0Z xG^j 0)("QcS! F(|x{kx"ZEO8~́;_}{o":EM1EZ=Iiއ΋R1Wy)(ڠq0"P% )8E#):uc(M0t.&[6=ur#/EXxXZD"ynaMKֆŬ5J"6᧤zzWY5q;YL2p `KNJ%{gBH) 3!Nk0vYSm011図#g:&`x9q;]{G&K ÀA cJ8R"@ PY쥅`n DX#e1 ]EGkn1W_;NzZ 5\;6l V[X$z ")mRz GL482X*F,0Cvg&g8 fa"1,Z1s{4[}cx9ۏ1I~-^i (' ^*5VZǔP4TJxl48Re \ҿA9_blMs"P>}%E׹Ha酓-ZcZ `[(ȦZVQ8p@Kag6qZJʺJz8-H3Y[ 2V4KzXiƕЛ>h +ofx8cF߰wX$Z@mg }!8c_7@f8څP^m2P|iἷ·c.*VX՝cFJr2eC{|b2)%w]C%٫X\[v].^#{uYY"O"#v{]`w_ U!ln +E߬*MQ L>'0̬ ŠRJ/<)ZoζGؼv3+^z]Z˵hR 鎗:j-V\YrXRM5>ˮ)W\l)+`;ħQafblO+Pݔh)2_ =jb52:.%oWp!:LpUڬSZNX=HYٜ(Y<"uJb ))_%=],poVõz곹Y/{i"il$Adedǚ/#˓.bK[Rb/R<[fWŪ"YUL *Qeׯt/)ACe{SQF\n.6W=XJvf~jtwǬǂc T>>BϹo5e>Y2,mdC?ILҒȤnlƃnzLmSm1aAgUL!^*#đIɔPNRT:ľOj*b=7(gItMDO>)8Dݐ*ŕ+?%] 6߯5p܀}]. jl]];_|Tվ;JMAv5KId~Ah=>]_ +8n +BOzš͞EzeZaS:zGC8 Ru(.5()fL !ZLPI=P3t$MRrreިyd ۘOP^"RVT7o[-,Pgv>oԿ]qvk3&7I3؂?͸:)4woFD[Up=cm2} ȇ1 Tw.%04:Ht:wZa Uoo8?Tw޲R] Bv(!&ŗ}3_vz1 GtKظFd!hϼUaNJ)Q4+; " ܪLH fH׼%m6c|s0G-XsSb9G>&I!GĂ].~S.1ɈnTT$Iu$!.iOz1Hpѓ^bv(֗]|پp~iBEP/{ax\dvvqWB/ ȁ∗-ep<뭗uJݖvPVW1o[,{Ex01̞UX,/DB⻯h:w^V7zY> D!Yylaꠤ$KeaC2,vX2/&cDrzl@+M&GmL|@IW\Ғ”U)ov2]hhYF],\m_l4yFF+{xEǷӉ ?s:O2gGq}yU|M3k[\Q'|iXnvVI L'$WH3rlv]v+ NFm:}([?# f}Z(G?r"T<_&-Cq^8sS/ .FA,-AE<8Lidh;P+h;v-@wÐ6M]j~XvՍMp_NZ 3|ϔFnXf\yP\ɰa7(1Zp |AA[0"e[s'Z7R'iKYv^i;|YS$vXn e P->@?eu0eu N)@$UN q9dx*P[-54HA{ťS- AP`*POUH#MNT4h9"vuIIjĸ ݨ-`r2\PYtQxS-*~h|Xy9p ^MB.$Z(P:x5b09fk:.j DtCT\H_d9J5/**xo1' Dg@Cܜ1L) 3f4ϙ`4N6 FSOE djz_]ɭQvԨ5mԈIci@_(F!3t -8\1V`ԅq[ )L ,o+\ul7M*- htQ .Q!aiRH$.w# 9i0 06al0]]2tT뿦6WS!X]P<#0!026jHFr֫fR7\(m( [Q8:_u0AAu20( q-UX(ʑ i BPPLR3eWC,1:ump>=1cy鑿[﫜xwWӆ=|}s d%?-_gze@.op5BdfKy O>~'pf T@G9;*?d<0jn[9tIv}vzZH:\[PDS(j)d.Ay\8$ dWl uHDJuنh-NۢdNfJohoکD X t5B/j*>F!4!>^]j '˧ '*ッI^&?ZA+w`kz H/cJy 9 S˄֬cx ɚ7YAu}7ןK:2 Ļy-pBhOvzO(NVe\d<%T:h/ ߚ*:HqhV}D "+&{O~l !W !5 ⽁`T<[|VW҆Lr_S,ŷ+ex,Bֈa5_9s[o^zfjHBQm0AH$PZ8MւF0Aʜ-w0zFJeGopj߭,\ Ray*c9.8cL3Gq HM"fl6}}\xUh="hB$;4 ͳͅ%ZAc7ڰ 04Fcߖ:`czvsQ=僣6ĿAW;;NЧQr1%ܘ,vJm5c--h<Jb$CyİtWiXj_SƬV˱v##zZ&z$5k({axxR?Ǝ1 g(x=pDz`-\G{\ cۛU\y^+F};JzOǶ's/S!3>]xrVNGsw3Z}/xUΪkU)'LZȞͣMCf ikJk,Bol־yljzi# e# c <j?<6*6CWeE&uWT܇7eTA7Z3q:65[!".p' 2*;Qrz Ffj1]1@}Lq"pj#3t Gb%j Z+J& ( : 0CmϡmCz>x'HAThQԊxVTWzkJ3*mF'bǜ^v9eT0u)߫U=VZ+ѫÂ򂘤t$qԐK8$ !F f&!` ͅ/`(0*+߲$ ,\dtR0LR2#l,N 4$F77ZUĠaPT\7`iIČ3X\BҐr!rcJkm#Wb2/[,0H`3;,6N<Yp[ݒZ[d06X"|%F+MFYRc(BYwO@Tv gc aZH ̗C'sT% {C݃R McLh́2XЍwB5LjKMؐ EɌ+f>Cs'ڀڢ7)KIoyǠ#\1Z6( -o P5+IHu+V \3JqȆn̔"5A R7Vq0©cC䌝$n0ft~UտuSRgd!ENNUCSY: bA ڐ"+hHT G Te,w Aq5mj@{O Uk[2R2|2)y@Ȏ".G6nVPs>pA\F<N=ik@CWH Cdt2FpOVd`M $P ce34ɞzK t%7 29c4U,2GK"#=ODAխ8X@.D9DHB a(ZI6cO߰dnC6~vjo~Ciqq*Sҿ^c[M;^Cβ=b$e a,'f`q%Uc蟭v #^FʍESm1S%[QoE r]v@G,rivݻwex p3ep=3f/:)ݗnF;PxďNu"F~o,̪g/:1R9ظY秀wA(Loc@Qx 9=llbf ~S:ME&d$eA9WBٽ;%ى ?6K(/}8S(/ى=[J*/1RRyI'Q^s/:M ygW%)P:g8]p=ybMK )/tQst;RԜI~p9Y:S[^D'ZR`;bU'+uo 2Z2?*g9z? `D]jh)_藍 JGMEa;=ihtM*=y# 8e%#$)}ҜisIZbГ Ug"dw;NY'#iaCOT1^*>yXܓFWIڷS D/揷we&8W^Pմ1pJ*kYYajBkJ=<)/k?G7ݷ j$mj+ɧ-]3M阶go[I ɘ*1/imrzNUˌU;+<4M3[@ĉqk ɴnka$W/{$ 1i!nVL I_*iijEA(95n YRxKJN ~\p9zrOP!qt~iBG;6hr歽9-.0-7.x bt ͞AFuF8lG}f^<ZKK,>c7.e8ĨC1٦h~qCᆕHVŋO6*8L.$SMT`1p8(dkT8yBLq1*|cIs#FGcH,AJ=֖mu-<1,~?O;͔}P-#LRS9d0Ĕ_5#T R3磗إh&Z,(6#a4G{["~aS?%9tJ̜Jх؜%VQ#eV Nԃh[;ik%^/iu˷ [nP»9V~EEQ&ŭ^f=]_k۠຦>h XB,7JV]'$W(`S0|*6kl7#!Ť4aQ8Mkj* AY5NM2*OȐ4d$1_c-m-` nhS렔4>U;%#{ƈFtmctq/1f}{X5uHꪧp'(ϢFco+G&$!D JDX&4*5T* W&' g45"{Ƭ5%·4!vUMNZ8SGEpVpB&NQium!|EE+!rkL-Az'Cr-qe'D$n#"Ŷ\3w]&Rr//LNڀlf 4(TjQ Q[} ͌i4/"'mSkq?/TRjXE36#{A7nu%:ou=rK¿Jgoٻf~1WUŠzus5k&cFkĻi&c{mvג[r&.$t;ュmTX}bY]IӠ eG =5v?I+w1E tKlNs> (#Z*sv~HW)Ge)Ur#Ҡ ,=9hi籙3z֦o5Hfg2$2.p$ I[.[5M􅱥N0Aʫz !77į !qV)uҥa&tki׍XǿRNy%ppy"Ђ1r ¨Si{ib<یoV71#WmDɗ7Ș*sdzO^/>11 1tVV0B%;fZfOП) aCnį6$ \;$8׍5Zhk…AlˇXz_K \*0Jccn<6X`3O/QȃXh:,U !u#:^X?F7AX {(th:R =Q8Ϲ\ P@qՍ6$Nx)-84W70@Mݧѝ_A.O닿?DU COKJfIZv?_Wb.6~~>Hrvín_l? RyۓOe<­HZm׼`1ʈaOpN]k*n HlaE{#j uo;K{)FAc00uwOi[^|D!\kMãSDyYyLԠWk/Z7n!XkAY40׆II` Q.Ǜ! 'Nt$1Y/.CLP7j ńWD9 Ap@k6&xavD#,'&LPIױxR6F #%1 %&Gs-XS u C-n6\p[$l\58Dqs87ڸ_] 9JO.A%N]&Bݺ9pZfx>e9U:_;|&ggfoo}xCr `\ڤu2m~khSc  !ne-30rLlx-JMeVkK$ .F.9K}l]D^/{_DZPyYP5Jn B#C)"240=N/"CfW}|Gdw51Jne!F*̆6 sKB Q|QD%O=\`fOix[Hh3C0-w21{kɧS{>5ioߡNW?ziʷ ?s(6)gՎw<@is!~zN>IL&pB)jd;NZ2q3Z\ںS[MR=ОPvvii6 v.fjID⿿,|[2e+d fv77zvǫwPۋ~_pdG01OVZƊ[IO[Bo˭NxhI=PJrZsTgɹ9uv2o:r8%hjxe}X8MW]꟯$Œ *Rԑ4=*F09X:OV,Rsb]-2p|[D*Ɖ>c۾c?{Ƒ F~!Ƌ g6Ɍ' ͉n&);ԒHɮTbX:_չ9 7Uǻbr4Fؠw $3. ъ;șqmpWo"[HiKxd{g"}BJpy@kQ>=bBfW<*;_tWg3D[TOK@v7=`X\4X #GPԈc0vFʴaNDTӨ cޅڪQ{^4\jt2}+Z  EK 9gJkKR P?,V%͆Y#⺁lBTW[G J{+9pm<ȋuD}~uY+[;pGX0kLAD%;Q}=wp!X ,%Z/2EawPSAh:r8P)L\DBS%0⹢,%4ՄWDa"]#ފ5I,%]qV pFu#9.(݀ ()v Ll*m-#߽Oz %J<[:lJ/(- f,. LrPXl\Mw8Uv[H]p%dfZ5$Ĵ"(3,\랻~ahּ0ԝVr^p@J=q|lr{7\N\P=dݹc.z}bu (%UU &r> ,}UAFEj"]Xd[MX,-6J{D,rQfq0ϸITMblDUZ| ??]c[|vۛxysJ|^s[N1"@Sh=AmIS*Ԯ_1]!e#Af2LBM se߮qz> HYaɔĽ6jnf Oi9 s}֦t yIQǻT7ﵐ-&Y{[I6 mǻVjuEOV j;#ݬ#^w$ow.q+3_ddx(eʍ:\>>'c¹W9,׫Gsp5R{}\M>smzOTnc!xk`^OoBaxMBE27~: }i;0ɾIg Z)1M{:1_ŞF±IݝQo:o1ho 4F[pǰNI ^JV^%^}lOkM|j_k] skp-3=M*Fti &q1~8OR _wodp1>mЄ=%`$=<$Ǜ.$o671'yGJr) ķ;YlsJj$L布p"%ib[vṕ=P~aKAX _uJ2A ( dFnz"w(=QIV:ӿCcS*ˇS3'+z8D-Pt8sG Z-DU9*RoY@h̓1/ I'*c.hUy!O_Oۏx1_JI KuCSXNxX?=J}#$<Xn'\ ]xb kpҀP[,8K^r#}*AQ+ji ,d#`LS0bۧʾQ-U#.efێ áWB1T!>?X 7UXNOyNYד?0Czԓ;.A~;7 =;Pɿ/jp#9o ke:)1vGeNkJ^2b>& -/>]M^v2@[&UaM 61}wFu xE鲂U?\ '&,Ck|)$wⒻNuC׳?ob߃v=bF, ]s&Q??‡hӏ& snwl"\a/^\~"l|GfxR}GK ->a}}y8%0[??Fx~,^ܜ9ٳ1a#1;ŋ}MLyu[x`ѷq^6TxGDJNc0dp J3XP-*08pȘ Os.} :gu`o]iS=zg5lӓX*B2;qЭ "rdR*$(ms􏄊Sۓ$yV 3|R^-.ɚûg'/b3'_nW]w~ݭ[KXl5u:mE&c\><@# "E0譳^ӻ_#q(xs^@x^ eu4f ob!UvrέY}{;Nm/lc4[*tƚo8yRWͼr<=|Q 2" DDAgs4&ɽxc'r6,{=uqU!ZD^`oh0,ؠK !X<#q҂*9{kgz$/2Ȯ1GDvScq@0eL&/t */HAnS sCd**a*hDYY,2Y{AS5o߭ }w0 ImȘT62 WK̢  [=V!zAjaبW 5V]A5.:Q6:%DvMi.|SI>>4+ ]9$ELM\!@0c,M:';sCCF"=f&H% w‚% ӄ`<y,pl#>`[Un7^x g;}9EF =>{=^i6^1zXzDŽVĄ,fQ&f[VšG;`A.i斩B4#TQ,ӌ,J9*v!$"8Y,Tg0?m$\T~Á5F$ijrO@ށ 8t( f$%`(gEW6|dR}*j[Pɧkۜ:+FR(IcI%.o# /FR6FBQ c$ _Q brD8X#D%GX'O7E;ӂ 8+o6n ߔmk%%MH+J1P5*!HD@eD5P+ʊ"-Ţ^XSB']7AqdH5"ʭ]#B'Z,`$Y:L`%D%˻((n`%T.w@<5)XTb8uP8!GCR5D\ʷ4T'[oSR !9M=q+BI_kI_8#:Yvvҹ8G3_Qvwٗz*Tqw7/Gdg0e_j[8$,*ll: psMLo-A?^]NWeY}gl0_~>+~e4ofW8O_\LAǤoUy/KW/hX]ː蚱:ƝO_&|`L._Ϧ 32Yqf3C"|.\UJg^M?\%&;Pߗ0up@]հx xo&w7}v^Td&KfXf7An/xrn|k1SJY0,WSVz$W؅7p/v,f1SֿmR=nE\AsY!kmI-f /bAC,''5"X Ύڽˇ6T:<@ &q<}HB MEv&e>sX TbafIfp(IE pgri01rc_Ɏ͠+˳r)7G(L/^:w9X&0OjAݗ H6b&CTYO3P2tTE   u*Ȱ‡ O(JN8?EOS7d~ܵ5?2A__d2~Q'Zw{u&6hњ > ǹȧHt]n^m ݒnznڈ{LU 6b≊9LX4ZEs[**uR]ϭ`#ooo 7zy;|@rQ͋U[ZS׎8}zH ~")GL Z.Rs: }HUiuvWjgXЙ*{A׺3rp 8n@b8zOˑ (zOˑ|!%9L[K{*HlR9JH(R~Bz V1\Q樥3jZZ"mO}44"L-@PQI2#KCA b԰??j`m`a1@jE8$DQᣈ0TJ8ւ qhR쫃GM1E0+c`K眗8$q4 mOwD+$Asubr;z~+>^bŀ=vRk^LgIP,.T&0wM_ uϭyt~Ƌ*{-Ö}:yna^~~'0xww*_ &J M>dx0=;[aN\H.mdJq톱F`i&x#`Jje`n Wu!!_)O~OrWZ<ڭ-hݎL9OZvBBpm%S껸ݤ``i&xieڭ5j.$ 62jNyGi`?Fg4oM F n- ^Bpm%S|,v$[[#:mD3EhзvkMڭ EtKȞ2E3\VŚVE楞Igy]LgTbuaQk[+3/V=vY= rlW3M %9m79m&sf;;fá8ϥw6&MI>9&phY%|7BHsȽCnS:Cr#(4'rɞf2BeE,R"}|L^ENBV|ٛkOnUX?oOŒ>q2QA9yCcMDh|<()Ѱ?V_篯O'9~}b+>e|GoOyh7穁.߼){|ݪ5GOGR柶3YN`-0Gkރ 4S|$|W`&9ѷ*p1ґ HitL*"3,*͸Jb,CH[</HB̤{= 4@._raTJV| b4/~t sy="$,3! [?*錂 96_##| oK39/iI=*߇8G ,'pEg߅iLOΦ{yː^'&uŝmm&frǵ;\dya0(z+93?*; QeCMeVJd0(?Sn\ĨSGN R;'9xD#ٷ`eN$D=)8T:yđ"Kmuu'^"\^/wqU+ED>*L'n39!(`|`3pn AtnܚcL_,4jxqV ƯV$;{v69]:@v4;l2;{ v6#mr(F0P9[ w";A!9qFΌ$'Hjׂkf_)̈́}OܿrɌ|9,v4eRrDbaxM8ty&UNOʬ81C1zi+e: +Z;H ba'{d?WLzMy,*SuNahTy d +ہ]΀L; ~1[ 1k[R$؏x: L B"57zK'6b͉LQK *(Y0oJyOX3kQ: I ~:(ۇƾWMsti `֊JcrNoW3,TS3&p~, |V>,Jda\ #;E ֝)VGrIvHT"MK;e[ZcTP֪Hfעa*ak.~n;.ш< {Q~Ytpżڽ}i$d8J L CAFPhs C((S2:24@i G24κiNQ 9aׄFp(0H,HS8MkOSc#F*LAB4+`kG Ry9.ݶ/gd%TJBI4JRƄ)G"VjQ:`- Raޓ}xH1=s3Bs38blj!6Qbi9vBD-aҮ4Fϛ}XHe[N;;@6z_`/""֎j\r%| #ͱ 1pa[yr&u6 )9F,hAV} ]Ϧ6|uV_@jyahs}gEC 슔AU6]ܬ;}W^[ĭ惕1ojD$E9h&TU= a藵̓6]$l7J·Rda#$koQ`wH!8myVOVӮ{Pz-e"֭75;gao9j YחFX2q=E0'?NYWpMb1/r4:ع AaaE%"Aa1B vk0)&Pyw d(;Sum _ YC1^y1].RA hUoo`|eTIɳ@Guw㞫ʋb;N'͍{g:*bD l<%=e^FAdճͧ=P93jF$c3Ѐ(uU$8y,ń"UTJ0WBR\2^\+VgEkWc+pԑ1YTl̜jH=EpoAh랎-\ *9-SKcXKCWf]yV%qb RSr(š PCLFo#s߅;r K8SeSTk6}m>MIM/dݜU$IF[jI@ojMg4ɧ[CHy+}ud;_o9Xis/UH<$TtYݝm9)ԻB#5C3=-wqsqYI s`??ɀ '2^2oB[=E }gp3-?瓆X2X>=ҧd}K-=OP*^~Uj6c~GA!穂^q4YT6 >LWqRy*b@_Gqb?F^ĭ.۹a+K2Ӌ%lձU#;/VAd_X%E#WU+Z!JՕ!"&5?k`zd ¿.#IKZPL{֙.&\3{I[\|--D~Nb:^ȱft۠_*hnI)&Pu*o̡̢*T*%k҆bUnj[W),l0W%V'-S<"ȊAL*~rAU~qbCծUzYU~8&_S[0CQ90 POIc!'( &p 0ja&3\jFn͢ j. ,*%x^/aog |&?LlJ3HHogûasynk氡DsQju"P+DV媊IVGKt)2MN}vکci;; `FM|p tyan?šQ$}z'Y&ŗC3*P[)C {Ͻ7}-$$]Z>&OxYP#N! .'AK&%ya4DK|boy"G@١hlŰ{m $ 4}iQh^3]ᜂA;̒VU`JX[ J-.6d@jTMv6.8d'퉑@;tR2~,hsQyjmԀL(EN"pOe1סpĐxaX(}3 6Z4idxaZJ;g< # :H%*18^TC$Ҫ*.?h Gbƈɶ /:5QPԔm_|DQ,DB^qXGEkZDH -K,YL5c ?xh7+tn!}z97x(S]Y:mxՀDֵSѹ|Ry^U bN2㙞<];$1%R>XZlg!Z8L&R&C1BgYdJfT[1 sx/C*³C?ʉ0yL)v{qzj# UW }-`2,l#HҒ ,%8b8΋yj"mHKT[w_ -ZG*xrjusp|ul+jB=kB*m`EEdeiuIȤUv[^lrH /%-!O]N.j!'c-;GҡEm}vmx!ȼe%-H az`遅I X *Ǫc 2uV:%\i]˴_iEՏbkC?m"x j*Rut: qLK-ڍ~^/vV~$[T`!S& 2Ne6w^O rݲ1^]8{yZM!9*ΪZbDlU9L-O͉TT eꠧP6y쾓i4mNWONS NUFEppnTz5 o; Y<5"agwp!E>e^MBiVlA@[*90=|TA̒H FkjމEyu L0;5RƜ.RKu i/Հ+Tyƍvv)ÖNS9GHNpwg(F!QpmM?_کPA%Y/Ed"{ejMk}_^5Q E- )-ZcwFhQ[.Ѷ.g $a6{p=]#4.Xf,Ϟ( ~LZР&=n}|?MdbFԒpߧ'>11._[a=@o[X ˶pWG(]%> ?XYdt/hdWѾrМ}8fbs ="J^ξ=)vv>u-+d)AqKP5^Hȭ5fm5* &1oNus>oH@ykPnqKd8N_lSnwd8T!p#Ұ;[_سona9:D04M?k0Iyɿ_a~?A*n.{wZݛIoí;睈?߯}Ɗo_x޿ۼ04y~gh63E&ޖ?O&"[?Vөҋ7C)k~gg8R;隅D`:XaۃٛJ"A9tS_+f!5dB.(nlZ( c)9ԛJߠrV*F bV(/:V(}C*aZD$3F81n.м KW@aӿ[ϒP ^ٛiC-[ew|B'_xG'9F:}1̞/B20FcuO5o\ОݻKJtLΘR0-o_kPF(T4xѷ,,A?JpSNz#r) mL_,ס.θDahz)T7zFcT: A5oLO*nTO4^ ^N`B=B d<9 Yݲ'jyVF)}& %"ğL"9X9y>R. q^HP" !VcJsSG ) Ʌ5(Wm7Vi+Hv {iN±Hq .ɞAxM'do^ԁX*,~HꭝPZG`I(I.W$p&NO/T0%%R %'G61莚ښ+.9퐎G,uc%4nѓoNL9;ԛ(F񙮒p4g2-V ijf5!vnH<)[e0f4_,jȴ߳ 9د8= '{1 ~~ x,MS̴?aJݳl87  I54)$F^/V$0=2y?VpM\MN,Х{'_Vrq6vӫT%L޵(_a!ܐa1.qkdzlfFL*ӴUn|.vCA Zn̽B9ԻM RI^ɪC46,'7Wa#5Iؖ%Ie[E,|x}.b0tdޙ-5BϺ+M$T(z+M"ݞr HYTÚџ&?>%U?/{N wȗɄab="I TL3P?VB!; ʸwP{'W%cR[,#nQ'tJ >D;nBزq_^Cp]z$=,m*sW*M0ُ }(%hQtuV1^$Wm}S4z0fgcETI](Jut,o{}FŠ4o !ȁݲVd';7yK[Zz@ oI$ts^ =:Լ˙vmH.o; m+QR*&Xf78-?*(-6 {,\Ne$ n3DG-vauhe-G<J7CFnb[a2ؿ늂5[] Qkt&0Ikú-SR@cފ\,Klb&oȌJ(Vmu$3tfgu'2C vvKw)3vo,-% 0t#[V[!&*RGYGW' :p`L,Z. Ӭ^ıjs.WqYtzSϟNӧi-[ ;6#8g孻o4G` cȪ6̚]or,z;@ c~SwʼnzyPM|/y#Eq 9w]tS$6FYv0:I"'=`ќ8̑W-__/fmOb%Z&Y^!"ǽ1znNц}'y'_;l5s VSx4TKs%Ő@PKCL}D!1P}Cs_$j䮦~mB_nʳ/zlnYׁeh%E8E;3$j(FEZhx͙sڟ Rc,.WFC{J+nH3Ӕ¢?0bn ǣO^۹Mn9m|'^xJ23ﮂ<Ԍ|n!_f:g<$ؗ,> 55U(H*!_fZRԌ|w!_f:ET7%R2!c%ϰ0T8l/3C- jF^qEOCC}FT3_Fb@m/3C+pjF\/3C-K\ ljtAPO!-{ W%Wm(Lg_60Qp06j < FC_ůDr#ۼ:9 Lؤg*܅Qtɒ?:8׿ox7MN[aZ7(OkxEDF˓'7߮Z1AߛD#oE`ܙ,RcߟV*t:S;˃Lh(f[/لq`٫-m?2) ~n]3!:e- l8*p#ȸ wE*%,3{)dQ{TADQę:D1֍vE[d!2:ubE%_GR+A87q`nAIi!]!FZN:z_ϩ_yر`BeKjſ]f; !aMƲmu?'>_~Wͧ;N]@<;B_gsUh-Li';Tw2O T,1Ø>#"2^a׃W]}tSnl˟߼螽|śӪDmO ʬޖ:tZ-͛Wo.ΧK1Kwzw>L"~s t.~b;A7LH5ܡ%H#7 ;[3vώ^ ÉS;nreLpMý\z_vGؘIv pwI/{<Vƭ<6 ~5M&8&{n%m1k`%XrnIL5Ɏ\ zPמ]zmiJsQ]Q]Q]Q]?kWxkęӵEW=+qΏg9E tj"{"P( Ͻco4OoՊQprf2[uOe3xS;˯숫 qMuSuciI3 m8-Xci5RJ&Y{]P!|hI|h_ Ȯnܡ;B`rx3jTh*tȯn ` ^P|x;Lw[S6 NV2R^{5Tl8h0g X-I>P1#LbHFwmp^\j;]lc nk/ǽm&oɶ=/N;Ngۇ>uAKToS?ArpW/q%&c~|,)W+D?x k3b`]"+ߎLbA4-ў-`Kom{]tt[Ͻl_zAFVce|0>|'&y];-&)q㋐(?#&<|ɁST0ldl3{㣰Hs;ׇff1{ +Al&{Gh8lr5L {΍ k;"(LƭF P?4ld5> ̢搧\>ze$ʛAtkPYZwbP7KTy8FTB$XubuKK<%9Q\0ǦvxfkorkT!/Hp//dg39^7 U*εb jUAf-i^6Zr8rlZ"u7Ue: K _g J=;;vxj>͜N PeYϔ;͟~7=#qZߧ2ߗk\Q1ge^/ѽ@jC]>ԉ| 9 J"!Vn{vty9 YeʚhF ` -~^hyzڲMG-+! c`  Q]H ŪRוy2q*pb?I3p_ED!Z$46U5U{s۫ߟ.j'=n5ormT[ sߦl 1f2wO\ 2ZKR !HIܶTʊ^%mn( xJvq3_+J 򈗒%^ q&=x,{@DBdbU?03Uwj>:RD:"9rXSRԝ#SrkcަprgD_l:vހ $wӈ ͻT,&f;+ (|)l|c7*FaUh((V,$!a) # 4+%㒢 {MJi$ cH`"kD^R(СQ `FH`HJ~cqF fvhO[6ؤi+j;`A.T;,xOfuɳH~/2~x7} [*mSژJT6-T qo `-!8H6e@3)XimFb<$&C 3M?,6o:{ǻpul-d4:^^>!T\.ef&7f4L/d[}bf([ͪ4\܁M鰏2 1dTIuZ%յJ[TRE$@LbXЀ3 `Ds$B4$LٻFn#-U!W U}..OΩUE}W߯A1HÙ(-WF׍Fw'Z吪Bׁ: pGe2)&Fs)X~Dk$ sG*Ȥ^dɞ VV@T&eXܶP@!yL ;aH^aYŐ[$KY*T :id<&dd=2$>htU.'D)>Xx_;{,pxp":0{A&h~yuJFmְBc㯾0![da1űř2vaGFΟq:!D3vDk, "2уId$Z0_TH1W3|E*(|"v_2CC`KPǚ?+n w4)jO7Ǔ5m8ӵ40S?=A[Pni:f>囔IZdzM,7wKń.W^q)-w~!UfoM3Y"{ݨ"Hs~-ҹja@.cPju@ߠT#h{pշ+ˆF=9-m$crr+?BiV0I 0ңu.J#K=bBp8:wN1m8qe;lP#ՄCPehD3?΋?΋?jF3G<\FRlYR: MbLH_qf:Ph殃$!BׯA\t;RjF'="hϋ=/)h;R1!pxUdt6":FGVZRi}b hTȮuڑ]ߙ*/5sn3i"҈r")H&(2 69 ,#q$^knZ:n>b*6wF.~`4J~ *lpdnHW6v,#/Ig51NueR=FqlJA v[~ٍnz2v5r:;؍2xwTZȑ5*9oс7h$AǣS26E$"-Қ"@/ !"q:P *U6ȕ0ACBPprc&i3MJf# LCvIzJa`4$9c`D0>JP<% qĒBz]z 8S CI:*e:eˌy)#%h#;]&5q^Q{[!hީDi7|ra |?҇_Yb(Jjtvx=L4}:)3KkngP?3RV]hi )FeȋF'g\}iaetUjఏ}^[lI{A+HNV@DP&WP|JƁnUf[O+wBmW`0+JD){қZkMmoP7%b HcNGt۱1/W崡j /gkT0sm;ֺ/g.Mg%&zz)sC<.]\J<+]<-5|g?_|Meo-6{P5?觓/)f_~h|{y9oh z]80,/_1ie8 T kk0̪Ǵb>V:Ʃ$$$d8ܪL/淏_/ftF×1QŔ~JFG9?Rݳz|N0SRn{/%H*bG/.nG3Q`0(grGTߤBW;3􀝕Ciz!}T}J;=MGݡoVhJ ^SJa9jЩfG- A齑,D7nDA-+K6h}0-& Hˉ/٪jL\"Io7W*Oαȍ]y9OY8LylSRiL}4PC) 055$|<3bl;k+tl:IqfUBբ0yW+'Sft-bbCM6Mk-= p V)|lb᠊J11ţmGKE,=xghabA|ߑD"iɅ4Ȋs)3ɍ>)F4E{]gu>S&qC6cWj DbV{,vycrb|CKL5l0($_ H֌OGj(1'}: `}4 }Yb3%*f%O-2Ǜ4D&tv8G&FxӧǢ.\|̘d3i!Q]{uXԊMyht51/@+G鳃&/Hޓɘ L^G?LFTI Q+;Ɠza>ӳl^L=x-.ɖ,E.̚FJMKXRL(MQ8S,I;ʯtaS,I;)z5HnX.: #%hHVD/uJ"22!-K:BrGCGPwO9`D`† K-D= )bVLD0\Y.(bZ:;5ͺMѼ7լGOjRpuH 덒.8p\d5}8Ǽ%#x*:Ur z: ySܩ \I`"Аm`ٯ̺\?W;L0?3ULkMўӞQb[# ߖ[,,G%>?QV璓*ڠGKJoHc@h %r)St,ZNu5ʝg:Tse=`\Zg$tqII4)gq֥J\v+.l c7s`&%@&h 3cJ"`Quu霳Gɬ,MBW^p#,E#Qgp+[1Ccy; GA> oGfDazb.IIfG4߲'3smG 6=qXm dLiQkvu ̚WΚOP1ҙ?XpG~B،>#I3T݇" GI:$H1}l\D u|yWWfƒ#`}VY h* 2%74ytPsLpYw/ěʘRXn[4VZv<]N96W, {\W= # 7?3"|f*8E^^Zw?3f:~a~Ktx"b=؆$R$8}#>^ 7ঽYc1 -n2.Ҋ_Z,3XRV姬}gg7emmNFc:2[WOOlж\{ $[%ߪ]YL٭OZFlk»'cr#Hʒ5Vm+x+p.?Go] hdf>NG\%)k!&' Kǔ`CEu{Km 0|ad|_vLj8a,6ʑX`Ġ"3* چժ43CĚRά6UݺBj,E> 2#T,J d1D 80`NRYͱ[[jѭ9 "Rw'p5 \Lҹp}]jKD--%0NR &H))*̱,N~uEhE;]/ YIį$Q qN:"W(h0U/Z_y(yY;$S43H3qG*$IHb$xC X+`k_C|=_>ʘx"X==pe,z/,H0|-TDw~KǔKLy ?2wo\p j$7jlL?w|'>.Dvۃ7L \#B7f:~u[赔1s1'Kj0^hCm.I&Q"4RWc׏\akLű&0.nB|^*f H cmcq:6{xUlPdѼ1A#9O=rZQ~M-iPkDoXnէbG 8SD0w03,#cX)}&)P"\x:}.Zcw YJbx+0xp) A(%)8FlmW{4@U  Q<&hX ?q\QDx8\%qQŲi} *XUaTY A Ɩ{) J3x2 (?kĢ  qνpB$DK2xA@FIObioiA--'Zys`%JΗ%By+asXɁ0!&%Z&qL6ۗSl,cFKdX#C0xU0$VXø%];3F*P:6 A'f6JީN+$M(NÇPO𤬹zI2m5 5眂6T(8-ur#;$bTuH} Nt.@He$baxqb8cT](S9;S=CH}07mgTR" 5%?k5;dY'fʎ^~(Xcg4:l LSF&H45`d֡%2W@Ds|+%.lA+3M ⼿ [b?ޙ@(0]!/Lې j'gM)ɋ;dy{'ً;dYw{Q:QNcm'_(.&Ҭwo(+ [LDs.Y4Hжͱ0sA$\gH4AU :׋qJ5.@RECmK6,iiv%[$ܳ/٤ϝa~KNQJܬf)}S-ҪE68=p$t`0Ac Tb]e~pQ.c!~ٽt:٧x﹟⇋W_=nyw'k2 TpAbUFDhǸsFDcRSxY2cY g{^Utv 2=(dNU*''x*c-+l fkMJAd1,1?I `PnE*Y7ZH ޒKwGλ@`<2޽ l*diI$;x 8=ng?.NQld,=nZ `yxw'7WᑔIwj!* >[z;1OQ?+bf ,H fxMeiMKU=}(SK\Of(BxQ ȟj"t%U'৯Z2UKʼnyca髎o@iNXO^5EH:9<}JuYP˷b\`;<_Ghf~@Ge~էtVLo@J˯!XJ/W'uQE݈Nڏ3HWG@z,`~+i75ĕ!~x5nV C8ayEe{w s}Zix1\BHc[Ja^lߧa{ ,z6\ǡ{ lQAd;oGڹmNq8V6SA?k^`:{nS1@|'IvIJ|¶f%zh9׃nl=t9H1G}2|ݹc!yZn{\t\@GvM]5pԆ9{& B _Lslu^[#:xޔ<<~<̞E`Ãz-%o1^ y|sx9dTjTO`/*tNYFiMV{jlk>0R)I#C@71Bڿ.vKd5xer9gG+ ,w?Wzǖ< k }1^1l粲kY"ܦ,CU?-4&P? 0]['˱{ lⅎ6w€Nw񿼅Ag>CZ!z5FvnZiLC5un`7ŸRn|To֦rorRWGw?z^G$>ӉyCix~ak *t!J' .V em;UI2UOQr`IҨX_z\qTyHYh4ܱ,KzҤB<1ଥ䄢ZTٛPtt:u{8QYuѬfh&hX6?鑈wNKvE@JmirI]-jlԝ%7Id Եgie+顰=;M}lYRᎩhV_}ZwZuLէ׎kegHSk`UسPܘE7fΑ@N ty,B: zơl`d*E*,4Ϋ KBQ2V m рxtYӳU0h/]Ujn*C PtTéM8J![ٞV9B00i›}hڜCQp|ce"]0t3LV jkjѠYEao"~MOw~!{2J䴾smSVeɥFKtx 4ˁ!%{M:l~6,Yg%Uh\(h FqñZ ?q (/'kR fLB9R@4#u\ZH$68CtHFM(8e̞rUV})%?.癉ݕd|;~ ;?ʪƓ7ϾUZݎ7Yl_i N*ﯪx7xl u^O?Y>MSYyc3=ZrKBil6gRN \8&TO_ok7X}na9n:GVݒ7ZW5!΢I"1-$%gk64%kș!9-t}AD]~2Hޭ6U[UNawC0e DbcS|ws/Hr2xA@FIO6ZލbPEuαwۣL75i7VѪޭ y*ZG)AS&w$+SkD#\8Y1BYIe-&K}bPEuαwۣL75QfwkBC^)cVZ3VP#7zMRD Q NRψawT{@V*SwmeLj3Zջ5!/\EkTemz7Rs@V*Swm0d;ڌVnMh WZ:Enz7[] N9n1gZѪޭ y*ZKJ}yӻIuޭ.UTx֕c7VѪޭ y*ZK(; TcT[;M:@7j TɣޤT/enRtLZk+7&2:,oj.iJ5[Z@k6VTӨ%(;דKPR-Hi(s}R@ 8\2A)ǜr̍Zb{9fJ9cn¨{9fFX1s@v'2̘H=RYK`V[2 @S9k!sL)r)ܨ%hĻc昦 )ܨ%p9̔r)ܨ%P»c|gG)"r̰G1ssÖt%"SIKxתCs17j TES1sې嘅*SQKu/,1 MYr텀I1 3۵h yVv<)ƩN¿,Z/mB"~k .0q^ͬiȶf6 biٿl>[ -_M" Q; :iu#w|Qbdm;++_RI sޣ~?-pԆh*Miڠ$uq 'Cinqa.É;-T6OQKb5"V<v[kC r]dϟM/e5 O+t%j BW8m p4h4\1n?.zlN Eʖ·e &[rI , +Rs^>-L#P3F A*%艆=1ǫtҋP5Lz毇ֿ>k# N8 yp*XN\ '6Q n4ܩ5)~IS=+iVy󼭤@AMĩr{Apr8S6-ӿ #_uW(v=ӛ+e9>+Ŏ2zg翜~-WsO ĂT$NഔO+._qPZBGi匔i,(iN6>":ll&O>r: %N>|M`Fk!$)%r$lm& shV.M/V{1 :9ބ~BǝW#؞}\]?_|)g(cA"3N dsLY `||x!MX 8WV+e҄ƈ jLA@%TJVEEŀpvBتcV4LxO`G؛4j쉠L"X 4H;T BvN;jmI%\.@=Q FƢ6,~1כ|{jn7 77Üʂ4L9,!^x߇cTgDdg }Xi`?3!CS,7AL׀j2G%Ii<vo|!؀S !mgo*y #c (Z1d5*o p)9HJ \b*98#bL s9DN99[P}jLċ-~:(c7F{O  CP[N?X`@[uZ<,.C/7!P<~ʦ!7v^p iiE彩>l:u ZʠD*:$&hGPLVZ`ۑv|g*ֽзvU}E|vdݮ! K+Ac+vL^!@06( +$)Zb飊xEI|0 r/sԱ'Z @9c9Ua ~aTĸԘ+˵Cj23[^Eo9\kFޮK?@꽎Uq]nUx(t Jgw>Ʃs.Zd+͟(9 sߞ|>GoU%t_qަkŵ:yN`dj1xfystȕ 59nuj]Bͣ=aZ*'\?'k?-dn8&Ap7lo׫E2 `UJĞzJ&|H9\%YӬ$ݓaϏx'=,~B Ax`5=bLv :W,6;nDIWj?Ô(\8g|y x* *[m*HU___`gp1׏ږrhP5WyJz,G9ٳH`wogϓO|Wy۬1دWϓÉoގ.ՋO0OC]'kI#QBjBӟ޻'kEQj:2Ó,ܻYopZQId=el0tKp N1cmz:mL ؋ٟ6~̻ҺO㙏.j͊[eݞL$~?w:'܅4i)?U?0r1i2ɔ{tETKU}t](D-7nV}!Ƥi bZQú K-ڹԯ1#1ljmP"! gXs)DQAj>'M$*(67&ZqXџF!&oR[lېl=1?ɂW,GM wc$B\co%ܬ|L!G CLJFb7ɇ\$YvI.ǬFJQٝkPQt6tu ʃ-j-(g\ˆ@1Ã>7|a*J)V#Lږ0f|bxĘv޻`7poVHKַ(R@g\bb,D W|/$slyy92!1QϳG\vOGRwY]x]o#7W}Y,ZfY4ndHIvd!IIdr~Ŗl^VZ-?I0x$>~.Ml ضWAe$boGQǡ35SEL2IkfBfcȒeVrRe%$%dzJm#$--lﳛE:^) ki fֿ݄OoNoX=[ ',â(8a hU>PpZH&軠(|j& ޿n>:gLnʞaEOD =$ֳ;R!و2++~.{z]k#+' 6WwN굴MYQ9F(_ʹ:LRB0]HV/VUq  )+bx>(_D/\2A+$EmvV*{v{b/H>9}o8L0êoXel@"bxGs ;>RTѮIlj3vۉZ1vflK}%+,mJƳEP:LA0AiMt@ EEArD6`x`(YR,vV,v,  ;\,йN}m\K]`"\FNYfۆ3Pۚ-EwhMDV| f`nV a2u, '6$Py"f.HHHx<^W>WQ\4N?r)9+͜,rҴ_錈V CQ!o2̾%bQ1TFR9iWC\0' md W9(T;,ՖJ%,~]?= ANeWv қ-{9Lʥ{Z1jLf-8 IV'';D7 GF*EqF /.&,=K]Ig 9ӽp7?ȳAK>|L idC"__65Zi9{bw?"m]33Y54h{h2-x<r̔"zv ÁDBxyp\ #-kpd4hdĕ7 pw}MCmm.K@z{ee8NIb iTnS. TնWsj0ʓJnsL%cAGD \H6 @:F$D%$L$D)ɕBh,Lخ),Rд?Q*ձ}=okJ8Ha3#mI0L̈R Mděiv+vf'gF&̨<)&QF .13caL"~lÌZ ]E@IF>0$DIHMj2EzFp& b#iR6! ?g"ópz豉b )-mx r7\p ٽXh l}bah &esMQ &[}vӘ4!y竍DSwZy6-mXz%㥃,ND{v%92C\"v=Wuz׎86=Z}BuyvDp:3/san4nM~_fggF9gэg?{aƽ-<:ۦwSBY~k?;@r{EY*-R<Wg7Bi_C4.sЙ%1E\6Xqz7&J,(?i9R0Q,k> 'm ppM{T1V""R893d\#S ˙KI>zCJ6]i%Wr-v +C,eϯW/&Si>zlgg|hEh2)sa4:ഖsFÖ9Ѵrj6ޯyrH7It}hR!O]':[9٠q<0i=wHZ\s٪|RG"bGT<ݣRRJa.(x\ *L&=4>MSsXIT0nxB1HR*G$HwP9xQXU[K):\IѧĂyQTڡu gnH%ʉ=CLkt[M߄@]z]WZm1Ƽa*Xun<)w`@aJD;B,0,"/ ZX[[B&LrF~H@fĐ[7.ٹ~߽)^\I< }}5n᳞ř?{4g G~2 y6 S*c|$Z&N*XT*X3aEpyg ϣb*0&f@V@J$KW9a¥.;5ZVڲ7Ԭ_hkR|gm_0ԏn$I2x̪L2\1jٖ9V: ZUYqDByRY㛋laKjhmN=Fqz`T}1ZH`Oѐ;<3:W{u{2{嵩"E a'Ԑ?FPt]I Գeǃ < MABhS9ks2ZQ,^hsOAxk?YҸ$j[#eu/֒e ]GuT<pF;ܠd;(&沦P܊nq9*Q (ssKW0m3R{r Ú"vMIk/#_Ɏ}PvcՋ(.WҼ¥AZEjmg]ƒ|uzsw'|)6 Lk8sD}^_N0bt=ᖍ77O7O!ʽtNJ͟>: 7/)>rXZ0Ka+˿<:Y/p^؛gGuRRyibz{7nЁtʴc4YQ6BZpaH[BaD<<퍮ӗ2}LMWAwwͽ[ZseyjԴqPSO:jԈV܈ԩi7)[1G,0s$@!BT4$GPx,wS=?V5䩷a5Cze /r݌@-&kFEyDAuQh`hΉuP)Q&X˶U93RoLF偊%Ӌzm 9XQ"d ƚ5%.(%2BVRؤacnS2ŕvUl WOVZr ="fcuS٧ڂ-{o %$'2ƿFbqmF$dG߰bnoIgUztJ ~Oqpї$#p{ 2 |,]7ÏʇV|3R_(G˝>RceDmA}7n\VY#7]Yml%D.AOw|\<BG(rtRu\B1˔1P&Cy9W?k:.\F,jSsRwN# W "SToEWG6ojK/뾻l|9l s~NdMh:|)2:$?sk6Ů\#MKo,~.G=6濖? 5_ hv4[^HY*Ap399 cB %)UJ,ͰCn~u3%7%ȴb7K,an8ִ̺ ^Iy```pOL}Bէ4Alb&g sc:p ` [p-?KO3DPήV]F%#[ 0-c$h.]*ggԯsht#m@Ó_4Y/Ȏ1[bJm{qImYmdkPtk?ts8ёD.Z-R Gv u-wTRVq.#_)lFɄ w&2jZjZdLW++ux3gH_4ڝ]h.o.Ϙ1g3N0fH"g\IQDB:*B:עHH|R.KQ-l5HDkXb~1x ]GliWKryWwD8fe% $+84)Z^ dV{QX;C:(v\ErM bn:_pZ\.?ǵ  !&+"ū>^LzMZ\trȶ̱E_gZzyDcHn٦%CEA;4:d [h9<8*vhjB9k3H>}*q8f `4wQk~y֓o\誌VفIؠfl #٥S*=M:x}_Hy&5ZNo'NkD. ;t5 Wnk4xBNz}ܚnnڜrQEd=`_<:?8B/דI m/xW65/s2}N}PvRk}b84eV.DH4K?MlLNFŋ? P룪{i893oGłgVJO WECI)d{]^ T%v07uvjRSU˧HO=yNMz6 Ps.tv>#s)dK0 0?V"4O6<"ӏ J #VY1NQ~]ܦssnˁ9n<>rFYb3HMQ TzQd Cc͛k|%]gn5Hc'5Mˬ7{Jܛ;߸Z;wu2?']J\&Z1CU* $)4L ɝ=!ЎE0-7i @v=:"L]@V )RuDk%A ۪20 2(.9 60O .pRO`Bj :H  c\1%%`RIh%A于  FaBIU[VI27dPeʠ" :[߳{30{&7|Sy<{06o>Z06NHԆ@8[-r̅`Ki#E{-2j ɤ(]{e3!L>ʣؼ 8k!.Lam)̀-hGCƄHޚPb"ibB*̈́&-&DH v*A[Q&hh0\c+Cc0NHCW.̓b"P}`!K`wUݠl=/훟z՘񗖖˛1)?NŊho@hq!\?'n EG7xКStyt.ٜ|د_-N[Ѵ߮S~[`Nggc˵ Un}溤^ߟsz~_N:2L߅KO\I7K>S9߾hIB"HJӹZ[m)[C Da;FFlPڭiG[ 'F2U:}uövSVP5 GtQG [j6$O.gDK\G+lY QQUuNi&;TOPUJkW9_p ]fm%L}0GlDiugG!dzv1)ZrL{uZ]^-Jc&K^->XBU1G==Ҩd{;rzfHR4c N-,fIv&oww /'&LS˧IÖړK{݇x$3قO?f7"|?=^_]a"k%+d[;3coznzq4MCqرxd"Kqbۢ{Kw47%ۨ}g +30(s,LNXD ,yi4[L4ᢼXCVl!AF2 "s$*lgnyuG+˒lpB IhN9}:eZn5׆௚EZM-њxaQ +Ipn 4O<$X8 E3DZls7RZJsvEՃ@ [ Br_OsQ?DJ13 \O6TÈDI!HA:t7wBԤX/4jA1w0‘HH̠ <rƮp)Ge&^CF9;V[j_B$"<3fB LBG8P{}>ե[]ҥ\TZQoaw&g¯|l[Ԅ"JiӘޗ~ls3޽{/"Nԅ JJԔ< iDJ9҉ ZJ1اei#R49)-vHJA XO>Y)gRNQoӠp^-O/ǧaK_Mq/|}lSds,֊Oq9ǣe" ^}飳Y݌.h ͬ9 y():8iq/ M;6$ Qk2r%Nq!5\x).9.GZf&pNar=ʚd^Ԑ2Ƶ~i'8i%fOJU$ ij1,E4&ɍ[/ +R4$Q)P䂕y jJcCȁW&5&IeScNVK/`lĥt'L M먓$8-2:I~Y-3I~'eJQI{aR=\ROm,᪰G,шWH0ҊAh OzKbDw-Ƙq0&'yڧRg)yfXTHQ ;HD M8!!+m#IЋw0(:Kv{<vуiBQ6mYIF,RX)=EdddddD0VS%x@@@k*=ϩ)GM8;9)N%}~ke,E$SPZ`"9UT( n:{VɔX';3(U.GG>K1 :*$ĭz}a/DAv@Z@OYD@^G=p"9,ad%Aqs0ccc"5WZJJJ BP=G_{mu3.ɨԪ&~l<|2n _W1}W1}U n/pm'Aa;_gy x" K~Ry9z]Hጚ>r{'wa[7~)yruvQ˩ʂ Z$ٕ*h:gcBdpǓu1Tʂ֪Kǥ Z$٦j4g>h\Tƪrnn)ׁV IX5YJ6U3V=i{*3:pIum1#\7rIg^X]kQ&`oo-!ݧ[vCHn-="ԓ$ H}H7/}H]hI=*RޑDb{ŸŷĶfx.M$CuQEB8z3;x[z;'6q}NW~Rtۨ1wƐ'}ɱL|i=Bis <) RjJ.E-h -Ն\ʿ.R!73V.0։ Єy%3E9jRH)qt5oZJYOj?3# kMq-(绫_g:$ٖ֭d,/=G)3 FI뿭>P%? rѿ&R#Lffg~lT|ܼ6PJ vWՌ% [ |6#!\تms41֚0%|s#Ev:j4(aɱƹR.N~N]c *@ DTI+.]@~]d@u~0e]u1j%DmcBSj:v3N3ik]ڨQMmd!3 7В/]N<$&vAy4=G=H04sLHhFg1f,j?mE\~E,uzszY8XTV`F?p͚I%WpKc46 H nsWbh\ׅw5P[\yZa񣖦`h˫YG!mp}x4"|dRx$к0c@fXIV2F^e+i)#x{tef,l&(2?EFQkHXBt`(<zKhʩE;}Km7\_®vzm@uCVVT.G5tc/eB+OL)1 ɒV?V> st+%7%k! Kv^wOɖ+~ŐªlNUQ?X:[eL'UqRxjdk*-7Q[zgJ4+h"<01Ji((7NU2MƯ9Zr7;p[1\G0;NA ϭ uA)CV;mPtpQts܈fOA!-7<[ P A!Qp *=m #Bw6ou4S)hiC:hs* kϴANb&4܆<lhHDAa{T{1[?r-~tVIH0J*Sߕn:wHZvY?^?Ѡ\Q)чϮ'.ܿ)e2|UUUUUtrjArt>r!u@ Dl'tN\S a|oKFEn~)Kb+9*&֨.GMF@ n?> ż|f_'woYsXxm K!'5E. 2U =]D(SVkυ6jZ2(N4X"f)bTa&'[?Cy( qD\C RK"Q)|. L(At,F<ݒ*:.nI͹1Vv)G\Vߤ5-A((zY1"ղ^݅0S2O#G )[mS*8!4  ~j+3*\`SSsN!W2݅iai.v9'Rl-?9D%n(H<j1.g^hD DP8^ph_CCW9n+S MPwRTLH6.̙F_wJJvP Z52J (@5% R*ct,@-7D5&RMS)V51cQ7%.,RSJhwo+ɦ&EL[.HKs).tcnguFcnBiJaleXs8oܯb%yz!'Z\8JA"cD i3Ҕz t튼ȹIr \ڄ\GM?48Md)n ZSc`Sڌ2ǵ$[oee_6k+ 4G6zЦUwrI1/ n^fqNoomlTkI?KKr>o[.4MuO U {3z/#$3`3[rDyN*+vr#DZ^HxX̮O˄EtVxQ;22hNdsl wP$ĩQ˘h=sBXirαR|#`pynV#{zhKD8 I׻IYٵh׮]vj@ٻ8rru[EI`>Mr3720Ļ~ UݱU]]#EEBDp5BȊH0yH&}!ӎh^@@z/ [ 4KFK xp*,DXotJ-0Y ARM n{%HV Jp *9[A*$F_ o K)/;CD&X\R\FjI`愖b lT^b%(a`LM"9JmԍQ#5! \n1=1xE'sh؀(J43u`5>0`L1fW`Vl[of(S۔ sY$%/e6+IgBv9tdcY. HNz# e%frhKC\;YqZD{Fd^E0#y_&U5˯PNl=:d%IjOFZ,U5m)i/<ڊGNXy7$n8 5eAh~DvdHFD6tdAp9l3 4͊m Ŋ63vH ;TgޱLS.$KrJajdCfTɹ'fFYRLAq,({(,C٥s 琘ƆDF]X"+{z{.q}D13t&S.Gks)s ]m<؇YӛO4ץxm>\~w|o[RWoˆ)3.N-_6]*l%*Zkd ;X+YaIUg┬?%a|)VO$@%2|\߀a'PjhkC$خyE-E&gNhgr^>Ҭ!0˱%AYD[/|h _9pR![-ˬfj$: _kj Y%,N.*uj`(1H3Y^O:B%y6eЋSgxW'Ż:)ɢwx,̨c.5OA)QRYڱNeS3f%'X3:s; (W=RU[]ЧO"D&9imx׋gNX=qmkȱ\ E,=Bx_b}hnl_r\(Hd>/M;~G1Z*>|<bh˥ב}mWVJp2-~zMV%-/jlh 3\ϠchHa44t9Y9&SR+~z'$Mxd #na6wwa}XW>[ʨuV|063 d7i[|~1z˩׬C:cY;H4o=D!THbY@u=I$G\) o1u@\}CYA"fe#i%䔇1dl9u kީ$~%>細,ϚM Q͞hOUW}M^,Q 6Ŀ?*Z݅?bVeEw1xˏg+|toڏؘ)Evs]~~̥cymOxm7LzdiQ$ѭeJJ6ڣvvxW!LYEP~"uan<$.]|ç-2Lغ '#;-^M)N+x}+ϣȃN[q2õ)2gCbqY.UK|8Y  Jzu"mҨ7K?qon !X"*,ZP1^:LZ :˷Wg>>GM?ߗwdYsU<^ݝ,k)};?ɣ'^I||> { _77/ ͜xןp)-чC]H'))0Ȓ^C~ETXzz{ϯn?M$;N9f}@,5'(]=8YxDMh-$Լz2TvWU 0W,TcV/OhhϟA-tEqUƙB̋ m;}Bg.bpeAޒC߻~䈞J!:$B!tݴszdl`35^Xe,;}MrF+2QmNesq]tO_} #+i)($H>EC)%AQL՚[Phb\]F29EaUf"Qq0/:ugzAH^N48a]L ꍒm0.޹IcfʈvWhc#bF994$Fh!'-m--+CJ$[],X귨HJ\0L4efM%OLiD/>D' V "$)zTA)7tu-5,%p&FD^ZC_#V#t(hߡSWC==~RIY'e,.J6OZ!99XRp0r醿 x4T[C@j(|I:l4۩t,4Ԣ2|j>S.$210x!V(['叒]O+26΁hі#1ڰ#)B5maD vOQo4{",1\-#S*atSIFʾ/99+qsJnWǼ?7v.2\֊iʲ)UP^Gڕ]PذyӰ)lb#/׭Ή /K wђD%;XR%U-]1}pAoD-):cceu"H[5UuR(']}vh@ɒFwnm梆qӆ:\`+X`uHv(͒un2{x1mb'ogy5|0 n<6W痾v`Q^C~YcQ!oXQcN v»1xG;݋CgX9.J&dsVVUr+V| ^:8 t֎iύQdV2x)H0ͲFa,1pp!:eRA OZY+)Wj3u) :GJB9h}"ӔVBҥߺG4Q+*5Uv.$CHP*X(zyIW9&!C.0EĜ! d;A`}#7ˢVY G2Zi` #U@ MX3KX97nFD7ˢVY G5~U߶OGP=~Y;XxXw}<.۷7'<͏iv4Hgm|tzss^Aqz/a?^4y;_޽y ?)R$dQ/aէӶTisM &Lx;Ո+b^ q=dM*8@@ 8r[N:x t:=@<^}.83 B }Z=$_;W=|/'D`h&B7lӣp4{TvQc[vewugQ%E%\0oRelj]^]^1]<\ƾwȗ&@fPҁzWS#8_NNo$x#N8/^؉&}iKo7؛`loz т,ʊ,̓GFmh@B Cs JŌCmTNPxv=49Sa׺ɭV/leH%Oe3Y[ī^`Ӯgs1 ^~_>85kw?U~^SDž ; B=Z6CjaoWpi?[Ac#@9L %۞q Ixo̤it4R&pC*MlV=x̡fOd7ؽZHP${A}oye$Uc|aK ]rѢR!ܺMqPEU~BغYW7v5;Ey[+ӣ/Unj;H 1A8F$qtmt8mK*sj_~nun::سQNh!; C wݨ#nAJc'a7GE~~t N;SQrjs#IJ,֥γyi}hkm#E^TxalOf1`{=/ۋȪbKx0}Ȓd%Kb%' e E/M4Y/h0ErBDa()}JN2J[p3+A0M!dAG2uF)#6 tSFѪ]Usڮ:zrU tKĄSy XN Ӆ)Qur,(B \hGZЊyjka W&5wOj.GNVmi]Z?7a?埪+yg;n,uH-z|ڝJ?qe`"wlȼgŢ99׽3/*BMZU:[2gX[ŲFU8w΢1<%j~[$ j-(b ubD:]VZE n]pEx7oqJ77 b ubDыи^ZE n]pE_ 3V)E%gEv0TC/| lyC67wWATcS.Dj~yBȭr^ Ëf%2ۇMV['cimtm[/?\m6ǫ[v4[B@2\J-#'Z^,+lO^/bq?^DU١EIt.OJKSrs҅ҵF~'/95J7KKrvt):Quhj5hέ*]g}hv3'^*9p~w371/JefHJgЁwl|E6{B[6ID1DmJYvl1),-fs\.|at g"|269bq_e?>ǜ<-gKו/l~]U,/ڇܽߌ@͐O~'?d>de;͐1yɬ&FAIӒ,r3ZSp۬E>XI;,ê#~qԇ3/*_/QaRT=3hbZCD@(YĺqRsjN9Q\tu#p-Y Hu J(j& Mک5YI40y TXbc,R\fy/Ꞝsn+%[hTDlU@ޝjC]iPQ؂2;T2ւ,g)` Eym)1p5^+)l#Γ%8]JjKZL0[XU(C20|oT15ڔចZ4B0vF6R TS/ `}9A5- itJRf4iC63y3VR60383MsI41HmHƾ^9j %n\2Z1-dCI!H\2|RJ8&bqrl( .t$oqR Y(lgjdws@)ʕU:8PkU%o 1Xy'XARq1t2>߁ A:lBxǝ \nNe3(5J`_ lu0canf/!E_QJrY]O1p@ivB:IRJhpR,ٳ$yvչ8R|[,P;C2 SA`@m*Oj^{t ie@yk{DfyK~=|°z9VNre+"1T TRpB-1>3?L|Gu*'Q3 Q \*QeA)(: yGj y nմzuDDK~-y~M3,kʉ_ThRʤ␘ڇtXgn( H^J)mL_K'fNjjH1?uNbhy6¥=6@U22C)c}[j^.7.ʢ0s!ej@V9N_IؐE^R _ڈ*9ΐ}Ž/yw$_魪-xxv?Χ\;'xAx͟LgU$] !nhz̙U|GobȫXb8\=&/-շyU5/|YG>>Q'?6~Hx78f7))73px_ Ɠ `\Ű^HKh3L.,':j(Im"{v~ޭ8+ZåB 4QD쉠X%1{8T߶\yiaqE͡ 6B&$6Q,4 sJ\B-[qHX{Ұ#]5kc!6PM ".K~gKUX*38WS"gi|hVbc0:mqB`dǂG`J%8ΈI+/QJe0:KQ߷7 ]{paAMD%Z(4AIST%pd^Q;jWkMfI8s76`*E5oT Zh'x%~F7>OBx* 2%`(ӤF.`;\S34 /kIk3jӣ:?3ټl|\ xzQ(}-'ÊĞ1pΈ9y)I33.w?dp\ -d[g=Ɓ"kLP)4v11bN0*p# %Lg; ̒; 9CિY-6ŢMmJ"S8Vn2,Q^~,Gq ܉@46:}UԲhT!(ZyG ݧ)_\Pr-J[2pGO%y Tk(1EdqM0I2WQ'"iFn K! Ez+:F&F-SC3AvI %ZShȘ|1ڱ^pfK'wFFZDDF kNEzD"ԿcYU}J,B1{4.`2IoQ&|aVHXvq pKǜ5-kO~ Ue_쭎"P^2pt]tDE뷝H苃"-I@K|zB{E3ό-'wȎ˺mL V\h>?>aкa lQ*UbvQ}'ne 2ĸ4?}]4 [3ipq %bnKޓq$W~vx~x$:嬱XDcdHJ,oFádk3]SWWUWu+idWwXwy.vE.wwқ<+a (p,8T*cm3ŃP,j] l,}U~'n"@Idep:ef Ql&:K٥ry2K3w3fbT$;Nv /߰Y:?ID9efLRieQbnq%멋K+O%{Mc$#e2 aa\Q2.(F,n`jeGEKM'6gCFql.epo + eCh2bˆH-A+n`jwKltlbsJa>bxE r>\\;`*Y˼LlDb<,35E'a䒧#|xfqa+\,(fLKS7V %O_˄LDdN1#D6;}D ._^ J^EoK~޷ζZsʖZ#_ܽxJ'U(B9b~z$BjqC3t򩯓pRlv_h؏ؠBHEx %ETM\jV ^)~ Ͳ@jSg6k/%khԶ&#ದrZ%?ݦpe2AdML&5lVo7 z*vz:r㚭 ;\CBRA ҸLYʢ̣HY*FH) ,6~mʂ#tl˓pXB-re_e&YQ=_ZOl\ZϴkF*Zm ,Y6!&U[_(aSUUCoUޜw!ƹRN0" X>aՔToY .ǜl(jIlfq*6uxg'b#{"I֛4p^39v3s,Tݭ=jRn6z qRٛM`#U7,FK͋P<%V g#MjAQl׽Yz3#vORcYK&3t{5S6)E];L ^ZcRzXH4b37 bNy0X2Z|OmS zy2a1^:.ERM}CZ]?ʲ ĵVj6+ $/[ӂ芰qNt.WTSA4[ӹ1M*)+ƹX*sUsSEFA NziQ_/ [~kBka$\5xCXc>SN{χ䓽|;.bA!f! Zde:lÔjĭpnd:K4dpv٣1i2d !%T.c2t &H!ܸ7\BzwQ+ѠD&8 8Hc)gRne]f?gL}̔{4J7Afz4i1$p [!L ~MZ=_28ײ h{s;t&zw_BK (q mgp a F&^q^1&@ &ZPȀKw.^@f@NIhZãh+Le)& pGI҄џowZg.Io<nz!qG3k, cfY"~sb{9z3Ӟfg6cxVȋ "c XXǢ5&Liz־v˾ >pbΙ&X$DH}.F\#|z.CO4BPn8=4Q_\G%OㅽN^ obyg}6vxf͒Ǜ=V;YJ\~1ST*uwjԥ^bE':1'O }PWIi-"ę11t266I ƒ]$+?Ue]$Afb) s 1`*CTáG#!HA/v $MLa4Xce ,k$JAE:dhZf / ,d*:@ }Kzoؤ01טADǙfB3=#^"O\ԲPtmЇՑS/\wТڣgQ!u<.y0]0…u #Y 2oȿ%ǴDr@98|X"%X TʯTTm"ec& ݱֹ9ޱEJa,f_'l+(+D)PUT*TCV9ci3v䃙vӺIi{Z7mO R %ԫ5-fqpHKj% t!;wZ|i*JNUK9)ey ΁*ۊ]ɵ2AV)^8ҸJ K%yNSe sRQSkU@* !g]ݴkvwY1LE&ub-=(`}b-D*Zc)~Q!*IGUKxz:B0]au-3-@9/1JG^P0#i#F ’n9.,bRNi॥*JQFRHy=׿}! +?ǣ\x(.1NI-Jj痻w??Dӟ[x}O}zfz2,> ƹ89};M{ٛLO:=2u?0~ˏl;4߶p|IEZäooI3_Gar | Qθ&Xɿ8M?PyF*P<-~>˜7 >~=t㣰߾gft|<[G6j>x82Yα_F o;gL]?ɸS8M ޾5L;??l3UWrp2lNz.\%ϯr:}?tH뛽~1M{d?e[GӳoX[?ʹfa=*ο[oSyS1DEBcotè'%۝T'0ϨoYqf!Q'D>LBR$ݽi޽:|^0yT:`{>lt?_/7 1. Woۓ?@#ϯ^ ozF t~>;q?&|_ãQ~;Nkoi@lslUp5fA7,/|A QoQUEM,yAjF(sNM_//~rYO!$D9m<{?UZwJ nq330 n Ed[5ZO.a<[*9rFl}{N.Ηv/b}[Vw*q+є^hAg-#5 z߉űtlr7M&w&5TLr<2?/IuF$%j%`""84Ҋ UUJT^Aݔt}ELߕ mr8RKo"Djr/vp$22xLZA iSH8=ԄMHۄMz f^OAZ3!4DD_HAʈG',2k_#w fֺo.}e݋ߧ[p_ْ{fT#r8-մWN(+.\ D:H ElSIp͛YuS3njfMͬfVLLY "G-(eSm ^FG4sļϭ<(Bڛҙ*eb0i(&ȃ\HƐ4F:IU!ȵrӮCxiRǍA Q7ac MXބMXބMX|feO`C]bH ;V$ edo<5JriR:#~*DFƮ1PW( ԛ-#Y̎`B~)s%Կif|s  ϮO/#MGq7KMZ)On)I=.hR(I~!kN p9ҍ.wk+BA!xtNGJi C*XjvC4?̺*J5U,`1Х5uYBלRՈeUELלE\,S3u)/{VncPź۝Y= :y^uMvl8_,u}"'/#HEci)]i}zGcO>~O2{֫}CiQDJͺDm"YsĖ|61Λ)%|L(M’Q>ʸåTbFS>ڜC#hKn$IۈW`r]QT0}ZG H0.e!;tA/Xk6]۪K +g듴Rb$iZ$nT %hVD5NXuZgEڶq )}ZA/4j>4!@t,:6+Di KL$!?_ZkłDͫdm1"$iP "-OjtNjtfqT+sS/ o!$MMϲx6XI cϦ]#¢ c< EBy 5sđgu,ŀ1G4Z YעgA0!+; ԙ@M ԙ@LVF{N%]V2Ob45$FSb|^eH(mܪbTsOTr a&/ ']AMԁM,m5 %+if r)YQtcɍPd %v*s3;"8x*9KͻcjQo8FɄ " VWFϧ]6ueS]6ue89|-7nw`Ymnp+=+@ipFuTnu{2,q2zsu3)JP$`dBe ŀ"א 8Ʉ]+ϛ#` TpIy&UegIJq5scf!>qpBtXnvm+)ٳG&NIn m-0dc??@D񻦁NvmZ8x__+45=d˶퀔ˆrߓp̉43Ӻ m>AMoa7^i]`sK\_q%⤜I_8:_ުBz hFWoPJƳ̒kGi$IrL+LĜ?p_F\DVGO.w^Ǔjiv=K__7gwVm|,Z0W9-LM77˺v._tj2j"$%jx]Kw`PnL"Y"½1n vL>|M 7nVŹf;k f۳V O7IumJ*r}DKsLRme8f+|+jC6oE#]3'x![qH5QZʅ4ǧogf 5]xV^wPO?̋,guУFtZZUEehQ46-ЋӓP6CEz̪$ش0Yzo]FY\ɐ.׺i_&jr'z/$q8)GxYq8wp~W&,M!۫y_\_!2 (b 9':ks~ :*cpΝW[{-maBm޳7F"!lXթv͊BND !@%`4A/a _ ")162gd>#y8泄N6'1֡Oͧ[H|4)(\ʅFk[mdCQQb7K|:FbE$b;҅R0%;/kn8w6_~5?_X3iWAɽ%J 4p^:F XÔkMlJ5z-Yd}ֵΛXtV]RO@93UƼSʫhkj̫y12UƼU ȣԸ)ۘZ«:q  c`Jc+VGPOlf-A/u¨UXMFGrۺHSD]8: n`M~iNjd0 -N\Iy`Ph|Y܊ [ & ٳȁ!* zS0>ҳE6Cf |q\И sAc.h=\.̍y62EvML!ݭK5-f% c^Qd?cdz+`oGvh#uvozfvTr& Yxu9v@<ٮs-j-C6s+Exj-$vClWH( NQ[@#j2+>Xͮc6.2d3K;赖U1BgV:k[Ih"9p:)y-֎ucX1yuXP6һ)8mo#eohc MTBP׫)j=mԽTQ'ɳcέ}-Y}T@2B6O _,ڡ9A0X9%C '<)xyr>vyȃF4AepQ6sntԐҢ1Γ`Q0!+b7˄B _B˳K0G܇0&dIekPQLBx "dLQ^FA9*{*(SQeDvjԆ,t O һ!xsyOG )J^2Oo/y~?ܐ_ǒLq%? ~~=#zՋ!F}m m09O8=@J<놫?ɿ^Pm`4 *D'xfGIIh]8i 0.׾+&NY[Kfw3|`:!ݼdJ$QFQ蒩4)6mNyp_{' }}%h] {/ jܞ.V¨{qo>rnv}`N#U]mwlaF_j:==;W?=oxtǚOG/Ơƚ;suãCvjM4}W̢Rh&|ÿ~_]?~Gf ƻ%Fp(^)iDj ʚڴ/@VGGٍ5;;tÃoHaE{/>{,6phv{D!5/佳Vj:Ɔ@` PEq }rѳk'GyVX\ `IRIuXI^VCMV_C>)mJ)Θ N~[rGP`5c;_Dհְ6|IE2"d 6;hR3x,N>VCV{,J)ew#/034ޟUO|9Fhc9> 6&ANf’ȋdža^l~%%"Bg((:1ZHFr`HNl{,J;L9NhsH;3Wzf6dN7dke#PV6S݋W6=*x7T~8=aEÃO2{snz&kcAػ{ йd F>YY;1NJCm`Cs&܋YtvV_,l\q^c06KڰC#$GCpV~UQnV~#͑la rd%1f#c.py82,mtd]3]D'j N2 `ٍl7=49a/$l6I@-9+wIt;I2HuI$AMR3LI`l0DggeIZ )AMr 3`xYX2=Amжthn2Hu1Hn׍G3/$0Vw6nxT u= [ ʒmkGf|q>H1>#2ON>X#a߄]L7P¶ M}#M$%\{=0RwOleF"$i3CsT䘸[iI!we=r46>0 /KXI4gzx]=SGWkQ"@iΈ2#~U:ֳRˡp|`mגr峙F:J.ƑiW4tF)uPE2iHKͩ"ǧd=W)p 9!L*`AI:\>e4[b0&x۱&hԔ+^e@9&sH|l3e(0Zi:^uiIOfQ k{+UwMC! rő]HQ0kE 𮎔N\k;KY}>q:gm2䏗f5ndhT04U-9k%qj(!G¢.$'ФgP(6ӏEfcP{YA%,P3h2R̒&{K*!^%GQ͇ m|W lƆ|t  2FZTB/ kph"lȤtQ٪ v`dY=$f)HgyU)&}ɩ5bjh:!1Rr` Y"\,XBZ"d6:R5H)Xt&Ƒ0#x=&1OB[o[sRU& ( Q4jTA7ޠT5̲W+HFu8)( 9%c4FkD, m1Y AC輡*Ogϲ Wl-:?`B4N Tip󔉝sO[*5զ77,`, qʨuΦ{eͽDh*jS A(Q)zM_bڪd, 'FQD(~OKKg@y_V1ק^,2`P۸u$/W4b5V7B Z]ݭ֯O\֫Oޮ߽z5,O[pZ՗h{_~Ի[n6*z=V葽,h{)^<0`?DzDqA6EK&:E#=P]uIܦIk;_oZdNY1Gi<.7T|:D3Mlc&ruU7Y~RO[Lb欹:&/Z*da!cA:NK[tIؔH6 R$= {O[M7=Bi|EGfӳ~" ݁ќe9 |gOgFa礭Ai_x/"dpl {mF‘+ O57+_dPK^ )t$|ԟ)jGP> g-Ko_/R-P0WkJDq!w1u8>SBG/ҿZ규>_2q3'SH"IJ|{u[۞Qͳlb@bKVQf/E|4FެMޒd/-!{$@յez|,};ynGpNn|KoHqGMo>Ƨ+& X4^W~s_,}Ԭ Jdk\'gwQ7SH82(ƟUHRcL{:RD * zQRRr 5(сlrR]l zg#.6v=HC K>/+bbV%@uI켎Ψ6[N| (5NܼGqv='MTx7{&%|xN?OuY̤kRg2* .,&`BYKb>CXMP㉾$Vw%3IQ|!/ld)̯9V,W8\[Jan]QkOEZ|;/E,.~S)n C%drk:7C#kbW7&0Ft͘ˁ%HBϝ"IdǨ4 j@K)4IB`:O]t:2-aܿ}]hGhZ{2V!WMJK@s\SZ\6"g=xjP=cMR=u Wj0KIW!U0 0J%TPd'!GBXaQG(.4F뀮#y?,SK>h 1wST47nhjTQ5Օ*W-Jq2EC 6JLp2*$&&:S4ʁ ~{ףOlcđ&PCϠ}KJPtX ~LHtO-i}7DACFfgkj"q<*3+bi}e hH$sv!ƍlMw[%&]cN[Ħ|BLBM۟0%(쇅39x4e|)(f,>8r4]<bh>Z}4e\rG<S$ͅSJAХ1Z^'UR`81@|aIBdM!70֋;TġgBt i|&Uݗv@¦%jn\eBҎG/H\Չ䐫Ό.Xj0 Jb>R,Oa=4鸉1OþPq8:rͽg+R6Y.x#OphV:nm{^Q$ ,ٽ̢; a tvm;4ڎ%Q#v;[$7Ÿ~j;~Β 1/5v<<%緻C⤈Y8Q2xEA 3*WR%fyKx\K˝S&9f$)^ %gL,;wxBKx z~wkX T<;ՀWVpԭVcE?RJy2 5&X۵^݂~eCNtBw`Z*:ߧ{}B!R;b#$w+Oş;uҫq9m6ʗ]C7u_Pɭ7Ƨ"Y""6>4@x0ou7vWw,.B#.'֚yz;{m^ |VJ'5 ?% +;30&BigN|쨗.=~Q/Aؾמ]6Ȓx\\ 識ޫ +^9bζ㻳3!-ܞf~^NA~g>24`oK\-X9]B7S&rc@GcDZ_Q&U/W!J$ͼd[j#vOO %hib_ڱs{q7XzЧÿ&1Q FJ m$9Gn=y(9DIm 0WyӺN碙Tm?&gMS j_-g4%rZUwk߭~fݖ bzV EF1SpA 1e@TAA 8G_q3Xܯ[e-t3ٴ7퀭kMhzisf,愈6O1'K'͗/[۽5[۽-vX"B!Y빮8&$yK2, ˬHdZpP4n;Piv4 QYbIc|pQrjw7$\%ᇼ#ϞM>=l6v%{k򕽟>ѭm0YS2DS }FTCKO0\ S}a+.F+3AVH1""BpfC_;OF( =>dcPbAcCH]Var3&dg'ǜCbtÝq \xgX2N|SSMԐq?Q3 MP2t0 H RFz62>a,sPI cUHF,(ǙUŨwY<1\c1ek?|1NLM|MW ط! i@pďYkb;?"l5EG\{`ÿ`k٦1ˤtK:<8ێyn&M:{6𿙯2yW^v֯~\p,rm^BjRcbep2[I eaw5>.cL 4'Qсb-Psx[Ic2ڪFym[um*Df)rF5^6pO5+B=U.f]=*{N/Z /p*t\,}K&%1BN4"P1,I(N v\Z JeSѭ8sx11Uz8!ע>V9l򦸼Ð J<8^k< cU 5i@øJa(lh h0Wd5ͬU}DęjXqϺ@Ȗ#)x=, zW3./ζd|$qͼDW5#)mAo;}*AېSg]!蠮]up'Z84x:gWcp$A7*;+{ \9E=H}D)[SI/!Ĕڇb1ogzpJci5U'` awnMFK)΢4m1f)va~"WS8Ibt@@Fr_ū9V Pи4ZNE_bVT0,ADB/^̺c5?,)T_oXGJe7-LoP3paexlaЭeVm )˥1$ A].ǾKkThݿ1yXHfFn ySRM`Oϻ@ @E=!SǾҞ9ͻʤΌ](0q-=2ӪcMC.gt 3g .L% Yiu2]!{@-qớMT1)=ȮBT\>?Yf3m$ 3 H Q8N$IFT &q)bgB Dae,3_F4E(O2T*K`8cBbrv(l q/1EM S viup0퉌F&y5ߙȾȷ#c%Bػg1D@DqRE"<98E5c&P sơpe@2"d%uo(QE+alCѤ kau[P !ꍰE'pq[Y DC1\mCхI5N8 A$AKcTXr(GfuOj(gb6Z IFF70I ɨۆgo=xV W٭HKȷ產>^oJ6 (_ ҋtk0IN )gq.<JJ !vNE8P̣1( 2]XLŰྒྷ[P[ Ή\B'YVxKlݡm˪C@h9=a sQOf}[T qdIF;MB4ɳQ 4 TXbQ_c湕 ֈNH߳t.}-}盉| _=FeD [r>r_gae="Y;[F ÌOi@p([>;] `ڋwg9ߛAJ}W8Ƈ$+DӶP&\1H%RydRvSk+ztYnM ̈g+1rJbmtvƾ-pKj)M].s.3r!y J#1NRɊXfV`ɾ QiwbhTQRWPփeJYBV2DV}9\nT|{9*@N >@-v`9`SOqzXFvq;ޡI=6 7x φv/&è[W2*!Jg|Wj}E~RD })Kk Cizi"Hx̖Q\Va@t&2ΣQAj{l␪P^c{/t";l!n6 [he9e<߁}ʾxJ.dž~؃cW3ek}c!LW(b/2˨:8)ui3mFz\tT蝬h^=QpG$Q"rg+l2DOJ|\84]j301,]4)RI5s?,JGK-@>_=C܂}A.p[4Ss9X}bv~֚0_9%7!8C˫fߗV h7h%0b*7hŮ6ބ7f"=wRۿkI?t6]6߯&zx5u0MJ}uuK0RQE:*o~׳irM~=M:1Bd4#EpKPQ r!OE* Ec*HA\xWq=Dp?7Պ^ö$ ĭDJOkV=IםRB IC0N .K;sTl+,@Jf٦QEˇT|?j 0`ߗ~WanYŞ#52Mϛ6X%ۅh,i ҧ]cP_[D[;~.g ;OW5ZT؉%~3lg̛.%^fBӻ$~iD2+QdG9b F4M1^[T>%f1NM5SYqܛm(5J]tSscL a6>iХERG0\UJ~lcx°MΓiz}DzM|%;tReSVy,L ]+] L2.y$cנBI E8G'Hbo$c>o$94@.V}xaiwV" @Y FB}>݀)7M>o,( 0^MZBT3aN1mz:f:x* si.Xa}^9cE\Ol }7 'S5`n "ՌȆ 5(K&; gG8ۨ`\DspaWoG3΀ !/LsN]ʹq$\:;]ɴvե㴣A8Bw\%/ϨŔ,aQ~W0uV̒exq[&(?x~$=.Wmp9xaleCf`rBU ۫QZa!_7Ml8CB*ޯD \8,00'm9g33T?>DIƻmǷxz6`?& m1yWIW?C~&g|ZΦi1Jr= ̪Ds%$q[H,oef,N)a'h>AMgg*D%ycDiiXYX,@H3P x h8ʱugQ:yj SLO$Uc`oI&YqE?<(Qz_0/2<}G?xdM{x>=?OLq/(PD?UI;|Ѥԁ'iK_~I/B82(iaR`ok-re0Ȍ#P6W/=#<: x9`M[$0#kx|?KG'Џ1}?5.en\|68ft1ړ<~Y|YBP=XG zNZ3 \f{1Y-(ob=/{.xlOC2RMFX63_|If ?J]xs?!vxYXM(Wtn^q:zuu(-ee9 _JtQ~ޝz>Gu~{:Ez _O@52\^T+ubxtfBY3[y_ْ egO0|yld5z,?wu *ɣK5waSynF@N\ѬY*qIvz ^zя}q0: @?phYR-NFʗH yH4tfn\<1&کC n)|u/^oy* ,K<{}~yo;<>v!/Tu8x=?2)̿J.˦\)<(\t?@?UE`BK4Dsv<|ԇ/||zxt2]f 어_\}_ Wkw)ėI/i"5~iw5TJ~ eRRn&C:KCW*|ɜq:3 `ȴODAcz/kwo o? @O``9U8(蟞xKN!K1ǿ lʗVe ɢoϟvwlU;?a bw8VwZ ߫{蹂U }'KTr"y *9=vAY(}QA^9Gf.;9IKk,!,P.,P)g"`KK IXzVtIβ\Th]Z]|^%蒀g9gː0\̫k:iDՓ[-EDtM.[ZM%L(N>+32'' "Q%[m99'uLl.W:2!B,dzgdĘX\ Y>}s{F4hI"M ܶ#mlLTn pHS* iGGm&יa{|U57L3TnrXDMXC7mH3""c-r`U4vV =%0o⯤MؼOF,{R!Y[(I$^Pk3;BlrS/.5eܹls/6'ؽX5^<\/%RS 6%ҽUx9`ꐈ˻sa3E Kr*@~O>QY=~9v /@3Vgų_~N>LBS(3EEN3MYJY5Cge4ÓflIt 8'Mg. |uzՎ8FVgawNGE y*TDr}!/!6r-RD;D*/2-M:\ysWƙ 4#^3E#Թ:\QI" 1 ""GGfBUcb2 .4%\Â#+b̂T"cZ3.0Nh45ϸ;?;:/paupjes$byV&\qj5flu `*Y>S4ZTٚ] iKyr`۵~ NGP-,ͨc|DӁ(%WhPpHCN{^2i%SR(P^F v&K>O>&,f!LDq~7I++(I^]Kd[l\|oAs8|A%2UWs8k̜1p 5vV[|hZ jMڠp`p鴲mLp:t7‰nKWdz.1*er}u /jZ7ο\ׇ[_}ƭe鵋p>ǭ (a+}k5~ԳB_q󹢢r/fMpRQ 9J8GbVDiNbDo-iOEaqdy˝8UŬ 5ZjwmAVb4&aj0h RXdE0ސa#ah.[V.rBD#f)Dkߢ(rn蓥@>@SsMCcpN26^A1AKV!*E).LG‰-KshJ!m+;M%+=쌦'rt#\;iVIʶ)v{i."`(>#JQgKp_fQ+5^6Yrэ']xeېWHG aGҤI¤FY44s8*&j$mQoo|BE ܗȤ3b_$I=VN:.p1x߼x9)3yD7\EOI:CC@˯d䟽>>N8{'"0ۃwro0=+XvM>yu`q}hNA홭0GPapK&> a `ҵEWL[%{UʴH x^v雛6LBY-A냁qED QChVN '[~H8x* UZF3˞3quLr &tH;v`"1"p+gJ(ckp K[=2vcMbCU*=Xo1u]f:(ghqO CrYaTq5@yv_,c JחJ>R*Є(,)pS B SΫTa] ʵq+tcx@2"hm(efu`1FT+yieIN v31 "m˙4S#ѶJJ-H^KH7HRH` pZhVhЖ+NRS&JQ(ɂu"_̥+ًg%8;k(WymճW3^%Jg}ݑ (4fR /W#nB#JЖCJnJkZHIu1WDV751/]_Aղ zvŸjֲývT_b7Ẋ(bH dM5EꎏOz9?a%Nt5sf3Ê}XBBV ڄsZBk)[jpa ٚ&RR!yC(ZJw/!]1&V,vF{؍HASFP)+PKQ{;f}?` [)W]iBdKav54c[T[1eDr@k7 ]+&D*LY;+D(F.AB\͍Qe{ʂ[Iiw(8-F7+^Szyu#DQ=abTkFwgķv2|E(УE3趼m8O>jIhA-(jM;ԗ&bCJ|grSJWX a[@Ɔ-=@E'pCf&՝>()Vw,.uŝ Dka;48?x;pQ'"HD?{Wȍo{{!ŷ-Eyr 3-M_Qݲ`xa)6*+fv-\H3yr[U\ Z2\X7߼{7}\=)gC+dGzpӌ/#m+9P=bpVR )wKF$b%km A2Z#!j }H mx7F y~|h0B4JVصa|D)_< o1\^zm-_n7o1!h.Rc>#w])=+}(ڣoզޅtV1&{݄&P:QOQߪ[ӺA1X=+T䇧~N/DVn._-1\-o]̖f9gRU ίyŻ[zƃ_Zߦ}|1gI+|.m+cj5joU]0X+N8]4}˿c=x͐+&_>P-|6_0Jn_W{h&2-M 3ԽVֈWx];/[rc>xqTNsv?_]yrg0w>?=K0+u}Ӵ賴 Lhܑ/[‘k8YQ+ U[|H-?ӯo E̤H6xK_.CYɠ<+g%AN" $AGq/ɮrF3)!nMNHrѠ]7?xy\=v~x0B FfsxfV֥6v>ެ\lzǛKc.{lwÞuWl o\I4-z@UMhdP3H6&Шw}rMjtykzz3EC&\@0<ދd$H6YؚX52 RZ̈́|'J&++XbBr^5@t#%KrtFJ/SIh:(&e/ι4|4+9c!SbK25#ɐEv_z?1 U.hyºLrN]k^2=&ijxVRکL6gE(4qR VeR&e %g'$ɭ"_2 ѠLCo^m6 Z\zE"2%N DѨ=JMdCdI̝s>mbzt[.R[@ĉ *Rkka=3h=liߒPr`ԤxSD R %9$ǢLf/#ĎwKmDij~\3lKC+RΤjJ=~RqYV)ɶ\D2HPP8nSH5ҳ~zx:2ǧ'u.\3-~j>p;(HT:Oy_,LS(54@ue6Rl*#M - ,R,*4=mDt8;aI"|CW Fߗ2>w"Y@ޭ2;Ŀ%J俟=<3g  .k{/H $lG!I).i]h`YJjg<9f'AsbZڦlY5%|p]R$` rXDNAX-d Ci͆I2#{B[M sj@ _tqymryG ԘPD"d+Ye\!dR_b-'OAb@$L&$y#Tv'fAȡ6v*Gaz{ol)=rDi4-JNERD OGD Cr\U;"9GM.Ǡ;6x V yFt %y =5 CLdݮcB(T>(1XR=05t|{(`3Ҡ#i3ʼnFa*d2ȡ0qH6G.& TV\@ct huw׳R =98tSgwز{,8{vRJ^M"⯴$*;$ۃZe:vR bX9TD6$ `DXoǠ;s&Rڶ4)ФvH1':uYUnww︠\(ӹg:#4#dPQ-uv * lk]e!Ma)z2bh }"9-$ Ք^ Mb҃d"Ljx)ZEY$ٖl2C(NO>cvץA͐zs2+gZ z,)8tRI8fDpޟ//YoNS#\l:Ie2R .Sٔ4O+bJ 2F- kMMp*GR993#&ndȤ̧&q foc$@N6UAHzZ:ТdsJ2bК|z4Aj!IN944P8F+U! 6'{!ӤܷeUDl刜""7e#2TjwR6J3ê'dq,C3&G'~ ݮ1 -O}ge]GQJ# ԛr"O,,;4LLb2&X+K]ٔwAL1o:hF"&,p=sD|D.-m+Etwa zSOΧG^2Уl]Umn]mѷ{F ě>W*RfK!JCOdL80h&셲A{bF{"F>27=ǰw#sDfn69WXɕ@v *AxW*?N+Ɍ}fQj_R}h^Ī TL+XW}T+2=-AV>H嵔_͓cP,.[Eu TX}":0P\d]Ja.y2-?+vypRGdy)nxU]hPYAo=d<9: 2r$aĺ{mõkfxFJf81BU*,M{ƈ3G/T 76G= ,85d S1x`k1q+^$c/a_׋۳!$vd$/9']j|pQ [;E ZBuK7+{v:W>1LAڗʛ͓t'NHxO?oȃ.N~UoF۰8Y#F=i\ &|N BZ, UpAU9?e3eoQv{Z?YWrKO6&5Ez8jmcMf^7䨡)d0I'4]?o*_34V&ÉݷbӘ&:JW(rlvxP"9.=\#jޏzۻ]|{'0{\Cz=-~'#7sZ9 !kٯ6 ܂.j*ad:4 ('4mNN:]?t=wc'`?e5gt< [Qld{іZ zh˰d_ܡLii(U7s{ޛKe:r>i#iNUǏMcL3Uk^#E)l& pL*@> KL$GJ4g,ID(}̺-,;scW9a؛{exfIWqU]W҈>m(:2YoԮOҀݑٜ\sޯZ'Vv H8őUvNE1go׆gY{F+-0/;.;3;_n 42dg$E)z$UT N:M6ȹ2shv* ˁQS+`ުP.|vOQ.X_,,˪0tVTbd2WDReGT=3zʐZƄ[YN p9<Ѭr Dn77,NVX~58۫Pp s(%5v )[8#{36 L:Qp &wֺr?=YVe5K0x,31i&B!ЬIh)33B 9PRipK(q-0BGc lgFg`r~#5*ES"'.s'߾~~'z7}jLR4>yT( ިցlkǕߨT!3fUH[QVVm?{V=yB!jqn_ƋO3Ɯ4UkҞEu˵}ȗ1#PZ*KEVT ooCR9- +" va*.Zhki!2Mxbr %mG2Z*dz&Owa 1dnx!>]5jĐb'T1t~l o-o/4 Ӑf%ii><lf_&)"Lb*#СM{zUr8n taw,BYu[y>>%{ |vׅ[K܉֤{mJs:`lB:眴r&=S/BzfV}g`NB6ny>i~h (%5G~=mwF:pUG/lQh(Vֵ0phT^&Kg$Ou ?F%3Fv₰h/L^U5N<Z z pj|&a% J<8 !,rȵAe3V˾=>E[6AyZ 1`zX ! ~B(sMyb=5SX(S?@Lw] ^`&QwdqMPH MQȹΘLdz^lԂPkwydǷ[[!N8ci3߿ }r %HG)8C܍y1=,eZmv*)M$)dCQ +˰TQS VY85GڳqD`& x2ђqPRRMm3֮ 2^@(rјq(z;]&! 5:'D}У_k$[ƿdPg6](UqDzsgÄѦirb0&Ga˥ zyG,o ((UnO/.%Ny8`r~n_ (VpnY.v%ֹFakQBv {_Yĸ(\j>YW=gUoYJWsp,ErIȾ5 ^[Nj;Ck[2?-/M#"FF|,Fwq2{_l=9IuɼE%AprR>b$GkU8npr˸Tޭ0naڽLo[/YSJ\4櫢):l&hUuֺxY,*egH=trJ#ԝ!_:rbp]1\S=S:x?2n0uD-Bh*WQs~yOLpT1|<|Oق?mi&fkqz(x錤$,Ղ $227 X](-:)%[TB&̅&O?B ֬[Ѝr` |eR`+@cr0Fpl3 r0KQgyY]@ `Kr6FbM+)-\m֭j\{pa͈1S8 7&FX¦u2%ȃL ɼ` b i5( G`8~{SuP<(X|Yz&L$,GR<)ݏ5)O`#R拚i?lL &"Dy ђEP?Lbtv){`Ջ7@i.Tc|suhfדqz:8 "MdG+O!lW\+mZ-hi0e3Rʊg~<)Ts*xN4^qׯs!SeyN2G;9`HPXzϩsуi3 [Ug7AzӇM\ juM ųB[406X |>8gpoA~Zک_Ѡq<>j}-Rr|+x`DHE'],5b+7"f1S޶d̅>U< 7>l&ru|L/S%҄H7P2N K9ãH.!H|DH5$C h/. 3V: tAU>&"Ay!"񜉄e&8 DJZ's!>4gZ5Dӑ(bA-V1weƣX8Q>wJt:bg^+Eу#ѣWA- AOҽ6}zf }:!10T#@С-ȫPY;A S+-BĐ%e$rȄcwsN{eQ=zRuc7B=vAc OQ=vc/𜊔jU jfq3)BnTnC͑TŻc BKur6*G⃽h "_d˵R` +5kϽ]0kf͞E[B:Rg߂ ᲀתl޿߅ +/V2O?3I#-$u*(B*(l|0 PVq#blJ1iʙ 7 )ψ#&uqڲqn5fs by> $Xa/Le4Ɠ 6<=F!hF!hTL:*DZ8C01iv#F820ASbk,xdYQFtFte܄_Bqo JoU}7[,_Tldgoڇ÷g`WF!qeWF!qeTL\y`zͨȫY=XHކ6`s%ME$! Sh8QXFߙ*Nq0FdM5gC{;H#`UҭK~'BKK~PXO)o1P:]N9 u^T¥L2Ոm4[?zr qlӳ03C xOO}Ď20ghbEB)JbwYx~͑\_SLxbu\Z->rd9e@^pF(UG__WM1G[fӰC) kb*nLxR O-#Y Q$7F>aĵ {1ν(ۅ1ڈ. ]4*E6Oa܆yVdJ2,fx"DAXj(`Il'L!NBR3I01$B$f4.Ԝo"bʌm40haKҊ:dj'l pHZ1:]޵5q#KNȸ_\pgkv.;}ٔ :CRv-&b3á#=$n4*oT&Twm O\4X2IC:v͈ě)ȅr="swxxsT Š%qF g:Oasb(α]Y+W>gTX(B轵-4|*_T|YΒd\nԺlzVLӴ 3 =lN*2€[D0}-CB{b)cA)"7y@Z M0O &ڱ#jݔ:-\ F'-.O6( Z,? ZrP[ F\vRbC }tEyj=Z#д\z.VG7{E^n6^Nc󲧗ɵo_w GIcXbmܕ{s$xl/08Jǣ+3_Z.G,;O 8"L#>ݬ~{7B_ab HNj?y&HJ6^^'z }2s7ƒ h 5Ha22[<$O@"E {+{ G}d̷*Ǹߏ`uQ]w:dPNV׶ ң0>0Ơ!3o>΋3˦yA2[y%AL#G_?93tnf:[zUP'Η\Uىjo6ȸ{?>كϣeo4Wwв9GÅJrJ(2g@jfZև [,XI?ޘůekqO|Y5c}@1$Kvϻkg˗h s'Ba%c-76:-i-oGwOuQ$VT[{DLMvi(')(,\_`G)bXE9)cs(Z#6Qiqы쩀~MQsqw \s;hu:C Ax&BOI a )1\ MΈLhcxnA`mY9>T*0;eE&Q들&#SÎx)(a?^TdJdQ.7OԚƢ'lG_ MHꍻf+ 'xkx ߯F"{8pu:j|0MA[Ǡ5Xwe'ga6,Gc/dN^# Yr`*+L[<9VRM -"V%=o&v5%L=Deՙ֕zѷ{WhO=j϶(FjV9Bk%.arjN@nل8Vw!ɳk<9&"rz<|75OdD\y1 9:~ j^yvƘ`}L-~0z9Q!J1`)ǢϷi8^XG5`$=Ydr3wY_֡K,yanE5-nPYtjM֓Nh]Ryv%X%j،Q'Ve"Z}0S^x(X7h5Y l{ŢC0<|jM ?w<֣nyROBw 3AwaɒlA"󲏗UOkS1!&c5?7/Vxswuf~!D|ş@m MAKNǣ+3_msOF:s;Ưv0'[bBU???~I$$bۙ5aQȓZuӵ3~;q/ӪxI@Mp]廬F&z3*E p|<|)[Y3P!ͮ'<,hG\ |[H}B)z_woa%ߟv- 8oiA NDXq8)wF " Hb6pޏ,'1 [VMYnB5BC7 $5>Vל BA2 !=Z8< BRǂR =EnυY<`ޒ1A[t/* }rnID`GTXUepM}OT^߂ w8}c6lkBьgV |)91#Nf+:޺DK{s[K<Wo߾~;F* ~P'Oc~*<nNB^ȯ[)~a. s"[qJ\KfLsWQc% qS3Ku (s9kFaACSL9|+`ئ@o8)xzǝ&pgC=7=6m<ğauưS ~ <) :IvN8ɑkam$v%j>ssO^J8I'*@KAӸI5pIZIIciONT 3LZTT98\9(H`RT)z3eIA2]Emp5h&ae|K}6uL@ַSoEW/ґ)[*TZ(/mZdT'FkӐ eПUx#!r.J4VUFAեQx18|Myf)>t3è7w^__!**T]Ω?) ^ζkUq^y;ٖ珽8O"}mGAN[>|_?|\Od8q_Bǫ/n}u{_=}Uuz{g &b :!l RsV ک,5 %q^sm~̓tXR*6Ncp*9L3l7a?#Ƌ.o> zBmmӵ(eC DE32R'»+ew (u !RW!@4F2PP4J##q '`T"0(=35.ds@+ ADʆ0ކdAֱr8(XC I jSR*E_{{YM3=-ܑU8iISVZ 5AKnB[x t11AQ6d&+ÒXwZ(el@}M&\ lk-1$i7. $Pַ~1v߂$Uԯ$m{)#N#촬Fn1/r1~FIɑ Ų6*l_ Yl%m VHZ+B ^!r4)~ :d a iu-[Ei@^R#UbYH؇-HLmQ ߕ~I]AA6.ݳG^0ȶ} EDŽgrPRz Jn슃Ig2fSs@XҰ !:imdAk^+OB#IucܑB|5nCqw@SV_*ϰ*,'3JM\x͛_n#U],bVVg]"x^_|zP1ƏOzGG"N8 5aW7Kmf 0o0,\ *;؄X([պa NǤj^4+ l YG% Y32O*uo~f`db`qb ]a"Vw%^= ;,g(!7if9^~`YHY ,F:p!~$ 46qT7"g)5m jJѕLe3 d%p Poy(/3Ali[euY@ ӭꦲl@'&-FKz4ΙbY)aQ7&Uw 鉊RCn^wn=!8:{aW$w^̡4ҖŁTШ|qK*FokgwS ZXzΙ]( ",J{Y[ `)ʼd"+P3;OG6Ri4~n ;GD~ϻO>}kQ/-EsIbA3@R!wd$J 4}s=f@4< G\v2}ZQL /frYQRdʞ\`%I;aE,K*.Z9}wi>ܐ'.eܐ'ᮆ<uyf<=1c'f:Vg bYz,e38dP>kxbi-Nn-2%RH13 {vsOO(D>=9bh$eKM!eR4L원L4uT9vrΔKkL/JiPi}jQVMs0@ l8f@e Е5 *0UmmzjYbW~cf)G8Zt-jt0_Nu+ B<3d" V{-KV$* \JTRUZ$dEAI"%LnڔJ!&!Q{[ ƔXhTUlJ$@>XJetsR00 AE><V(tɠ,*PcEE\Z[\K$eʩur 1;nidF+YsKdjyec!/Jj)IZY(+q4. 3^aX r5-,`AxUoKQ'>fyv#?[^xyl?m4ոN)M = Um/V8 K$쉫DW'(zy,S#CpZsJZ88DE^z lFّۙUЌeHglbUUV;t_oUnlT&w|޼0Ij}&Mv) BTޱ K {>Ɣ׿{A#1~$Dhn ŀy{>`4hxs7qzY' fH7ǀ bg O‡\0@Ԝq)bKsj~3Tӭ9k t#zFw/T-ZXNu͋.gjN#%%n{㢔\wp-XB&UR|pn'h?4;\H >PA](;v;ڊFS̤mYXcQA. . JW@ R>-IQU=E2\ +jlBxYöI9 .o>m*;uJZxu)o og&Nm v)m8<} C!kXź,Y0qQYPk2f+]bMvՕfυN 0:x!'Xh]6PB\ )= Њ>ng_hZ]>[w)ۨ0{]j.lΡA$H@.]x3diݨʖ;igw]V&G)V%\]BW*sډ^=-HSmV2#Uo*}sWTN0ͦQ[a/E"&=@nM,:f "RNQ]gץ%v'Kf[.ˬpYLl]y42YT2# ȫd, 3 U%emoj5'0Z 0S"2Džա_"&ꌍխOiF1AX1dqsWnE{x=ݐ-N ԉO"X3 96;!ɎxS\"aׂʪ*$Yo+ ]Ӓh*Dr@v,I_ +SB +@ ]*p-(GƋp\P%yᵔ*oYPdh| ME}HNazs%qpJ3O S`p lhLYBQ;Ec Z?r57NלrOG5=qcroa DH 4Z*;CX|X1AL2%Gw\l1\i|𾒜dTWCV9yO)8=Ni VDPG@|\QtqЛh_ʾKu>d6/UBe:vOV).RIɱk#YhZ#qIgLАB)cR~$ݗJ -5yKZ} m9/(Xʼn/,N[qy=v9)Hܭ5OҀ=ԒeLM3)b+#J..8%}\?*|\h`{ v)b@0$`Ū2p \@>zyׇZ}^g>**Jt彴f <Ƌt- j=@ 6%z}MWCR]0u/KsrH&][+9]j+5xyI/ES$FXQ#ekk.Çç,PnU [_WC_ի)oC&'S[=/G^ SENy&za,R\~?@h)*WY}a"#_+DUyJDw@in?Z'c\bfsaaG H=>p{zsېXp&t`)"WI;ڭMJO-lfB{ +3g~$MkHzȉɩ%:F%F}MRɁTeWEFC|I/"ӮČj6c5&Z̙J.B'"[ 2Qvr6=:s5~|fTAg!&<c^E#k2 bF6Pt\ 5fWUzc/$v0nْUA\_Vl4iExm,Y {:J!).1(&H\N&xGI#Ěsח.J|(n}9iE=VR…] !l4MxE*Rp7yʿFcׇ?nqqk(nsz5_>e&2R+c,w4VNa PAT~&Mph+m+YM29r?Ll 86F˝ʌ)"_TnG), e,]}U] ?:p`P/~ȇ4EߎΥp[\`) ;`~y Wޙgpg 5"J5Ma g'nb< Zyn⾶|}'t"B 0_ >o8 >yz.|4/߫P\ QAS[R՗zi(}9 pOX`kR&™JEeT4)8e0-ɓL TJ 82[e_ & C3O6WѕVzHfX1UŌjIS@jG# H 1:TP3‰X4VP >] 4u2~BJ]K*4~HK 4`mEBqm>HcQ# ƨUJ_SrS>L$^6%cB6O)*תx~hݛrF 2`'`y^~IR^Nn]<&3z`1a2z W2M)W7y-0RZD+96zĐaN13,chykt~Oͭםxw ;Kł^wnf:tb`LgtfKoӼBz`B|7/ٳ51%Nw-kOvy@G (||+SFe7.'gp>u߻I/Tz0_i:8>& Ie^X0@ٌQ2+ʐÞegNb'`<M2T~NT/Y9SRXp J (gNɾXaGg  lV9S|9#ˑlΤbINGR_25DTӓhzrޔDlp?~|o|#5+ g.\2.b ܌b ͻˉUW-K lR )ZJpϳS :Ũ+PF94hnNnK<:%dzŹe5S.Th#x'[ZgPsl*]/aQϩԩXj?);Aq[[oi_+>x|Fi=-U(uA Qc2VKi E*sfr\#Lte9NAv~:4{4ԈΊhMX~:?ܿ#Ů UXqp_l@# |p'szWq?"1zlDBaBם#wR47pABЇKp_;S3 <nqyTjդz7;ܫID(C;ZNJ2Y9caњeah&Ѻuo)Qtu:DIؾu;Y0CaRƧV ( }Kbp+d́\@, DBX&즄ݔVv +'(ES@`B9gAh(%#GJ[nZPU ? S +p铣*ŧj07o 7 ɪoL[>,>jƴyzoZ՝GOsHbS^fTq!je'zqs!{bdںjpOleF),2$[DI#V'95 3(ւ Ȑƃ@+Z`;6h=B:SnahgZ*08!>DB"I0o=!)EG r*,pa=ƭ̙epz(KE"aRG : P8>h0J;x4*qN Cr:M )}N&x8vP0~{#MA/,gK@-gߐ\.X]!7ׯ 7LWMKz`d0O >}0\U^gf2i;3J? M6)b2JKHr57?~ m/J`Uf){`+!qFxל+"D޹S0C6m23 =t~VRo2bDpgS8u4! Dahg`0Y:3ՠD>#tyff7Mgfvә♙ET'ÈiA{A&H ~G`eWGd$0dS_3ZP%ϴjGj &^N=UW>x%J-rd$F4"'ⓣ' j):ҁ`Â?I)JJ-Ai.{kR\Ckx8*5V",a]OݪrPaړQ)TtKR`(yffQ2[c~ ^v?@Y?Lq>:H&1!!/~)+v5sq@4i2_#8D"ݫoh{W#RdKd㱜81ɢ9KbTSN^rlD՞W: }=gJOuydHw,Yb?"?-)Ͼz0W}؅jQ6yux^E"JO=i*p,F##A NsL(NH9{uZU[DcO`_\) ;2%ujϙ"r/UBFe. T=wDG1vYfu-YiEk貨pD\y5'GqoLYИXDs\ HVe./TrWI g]6|6,5r>zYëQgM@|aUlsf)wqΐcxKϘ~֌*VYW i՛^}tt;-/y*Qw3Z\U]kMZ}kkRNz$QlyA'fD#2*8 ^-2Az- )h8 F_?0E0kƶ OP.g)yg)yVL`^ BSR.ZXfB_jte Ui 6bh 1 Y:PFiZ8'6F")m%#(2x`\jXψf~k:CQ)]H=$$>U@D|855,264Śy-l\! t٨ tlR yO?I?^J|;~6Ó96|X I"bh9ߐwW&+ަWåO"܏DDp9+P/tYܽ8;dB-}f`'ɖo`tt)~ 1KQ@cfˇ/])ĸPkwD1z D(+2 ƂlI#BsEY )cX28OIZ81-))ajG>zb*>]^5@H㗘NXC/5y?zs2jQ\LG_9mdiv9e_ʇĻud7\6IQd1psG`1 Cb$鞞 hc̝v+B(O5A?U$NcϼOw]j  ~ [vrW9EK`z*g䮕@=hg@4uո-c%X?f01}$D&k\q6 ?Ow@^&guX@נkŹ?=}vz8ߞ֘&L"/O{^i TX+&"^B>%8qY|#};'$$ Li A`V y_gc~Tޜg < OZbt6N>INrQzߚQFNw2lM$n]ϳ3e.Mo<|m-͵EI8J;9Lð"Lp9\`,O"T@c ղR*4r ՗ \hڣ6Q*koא+s^_8@WKQWrS-PrE=hj5U8fk Q]FUEFquYՊ擾7qs1j ǣs\5ppUEǘG+jȂ/_* d S|3"ۘ/3d!dKeּpb;ˁh8<ĪyHn>* ̆hDc4|eCJ 9њ4쵕ʮMpU%zj.uym D˚ݾΚn\ɨ+kD8Mz 57Dڣv¸P}!IOq9骸!KKGn͌1Ԙe?~W&,Y+3 WU&8(!͹nLG&o$c%kqyg /׼"2v.׼*;4"zcy.w2.eI"lnub+6^(=Жj|QpYuXBf",E6'­FEX.ms&RZ2fp5Ftuq5ͯ*+1I)-idE$k^Z֍d4/QMca=o=R5@B }lSH8}k֝e4BKp{L[jחFWv{O#%gˤmh핹%j/ZHe*H&.Ɍn#Ha_ob .2w#7E#c#a)]Lԥi: ƥj{RYJX{-*i7xJr]Jt@bIS/c~<Nt;y2{_T֥]n0CR5x.,5 #D NݑbT klY))b(Wf{܃IMgVI*pvR++DTp}jцew(BBv"++Du"|ٕE1(2tS\ڣ6sqBp(1 -CpOV*su`j*yۧs'qQ<5X1"lSU _p\ }og{ '1&F4_pB`;imI,>'8?%ΉW\bEX}u}.*9#Wǽɼ4$i79bLRQg>MU#ASsK]ʼ)k/&͉i9Ƌ\^|Y)YyǏ!0ʼn$w8bR`.u'2pmec*KʻsfE\ 7)$=e[YH k⡇T3t`gh, h\.w=j6Βsñprq'PySL[F2?/ئro hYsPP 58d۝nϣ@&.'ek¶/ ؖk^[7WmG|!c1ڡxc$) E}_:|A#icF7epIּ 2(@^yUUsE7Kwk||wx*}rI[qr'qͪwjtDeUl,r8DX!"7WMgTHyƅl$Qy5̶wJKtںq =H+G[rEә %GY:A6)1jJK8x^;AKI)RFDT)cSeݍ, p8i2wUÊ&!Y [k֪ۭUD4m$\ݿL0@B`l\3L:+蹆))QRRln ]TFQwk*sW9}˓\ v/`ºSXКz@Y$S-69`t%]ŝ]re0AEl@dX-]޺Av5L󘨿g{ݛd 1ۂ]BQ*%& ڻQ;GlrpS/.!QLv %{r^1|Ah uVDAdAeRѨTAʵW4 :w^@#n\ s/ L虰[ >;oʒe%"D3YgYrۥo=RT,('Vn*N;]QP{ehDթ67p5i\9b̨{X d=n'jYD+ǨˇY"3J64Ƨ̥dfH.ٰ nEr/ WdpRg)ɬy})ڨ9viβxsgl2?~ C0jJ̃*/Lv%SĮj(=إۑ{<=Ue-st5ojߎ8+x5U{ '֏oy;Rw\uaURR0l00Qns9$v|cA=8͏Ǐ#?Sl zzڠAµVNtI"ow4}#m;!Z셞w ?IL>Y&A>wclR#'Ћz羛Aq(5GFSN!&2n1Z(BrrEl8WN?i$ NR?~]36sϞ=ׯ^|8~t肝A?cgO~_>ُ<g񳷏|uߏ_Naֹ71n wޛ,{" xG~7L| ߊsn 1*_:?h,B烹O_]yYpd>b#Kg'?{rmaNFJVhNLn(͏эf&n.`N"؞6A;U69k=63'~xJYM,T_]ۥf{훝,,]~|uk~l~76]bӫ~|o P0%O'w 8,ӑ"z+/O}G"UIt32iߛTP)p{Ÿ twS N"=ٔ ^>'s#5Wd6ƇʻruS97!TFhL?'ԏt1)~3;Lܗm'7B hPE v2Jvx#kᆬG<# Fǻ[[zM!wʱaI9WgnKAn 𙰸5<5>E$G`-ImfΌ4HI͆P;%LT9 #|m<8SQ^:X MjI)jjUS5o8*y/~Iޘ^Н;SW\ؕ;3>,^Ftk׆^w筏ݭtQI,Z Mgtfb&1p&#r:%-_?eMZbX/bn 4]tAwW~2;?gfqyf K iEYVĬH[+/ﱪ%\ۼ1̈́,ifЈTL⏫XFCZ;F*Hūu!GE=X"L)g(oPHm1QC7-Px{K*žqTr!R wCpHV"/(hbE"``}`О*{ Oi-XF4Iffk&e3Ut8]9G5At9DGTM1}3;:Qp){1Q 8J'_.Va 'P 䞚 V AtFu۩bлnx+"r _zq9<,?vq9<ábtu9<v.5'T˫jGWgkj@9~Bip$ߤ@ewfٲ݋e'ioܺRk4TnFU[w[Y4 ^Zmw?wӿK=|.e^rώӯ1}[յ_}swŝ t8r d{1-?\e㿗=Hfoj5*E;ҐW6:%kC8W7VNgĬn{4ӕ)W=W.4Utg);{8k]Y˕s0m%4(B}NkI!ZiH+U22F.+ sqUDYzw}N8 \QpRR݂b! XCẻ MtȾ@% "=Lx`2i9ADAuW$s8$ŰeB39zQ!h!kx2ez$ju9]T}dloWkm2"{N]MG@iM fdmHJߴ9΅n[pM{H J^0"*@@cLθ%d_69P4FwQ(/dm'jDJiL,e'D@(4!B~[3{0E%[ޡt+FnW/}4x r $3yeuD>eXմ2ܱ]$k H&Ez< $8EeR?5g*l)4ht1 iq#sR v0 aDR΄7pQ ަnsX.p(PD׈J?f4Gl2:Mʅ?ޟ=_~_'Ut1{;|z>__GW:An(], ]\߬E&>Ӥn0=f>E1)ڲTܾɥVK)nN#+gİvA6-=&nk?ؕHX#$nEs*~?n[r] ŃjxZII|{Z-$:|g/4OqVEFhZ.;|Tk|;I:-#LԉMb.5"c,jkTڤO_@R7Bl#c]^^y CYT"۸e>B{}9m4ZjJZ_C9@aTuc2n :N68]L 72⍄J2TO#hyc!Z *[;D! +1j"h}&˔rAUaf t]xH׈7'J2jMoݨT1н~SNj.^Z`+.mw6%Ӛ'νJ7TpRjDRo#QnFᵓ4Y23U[:L;iَ9T(ғ|L]qq*K?z |(^0VrϙD7 V!Lŏi_ T1+r("@ȡpP D njPEAؙo?5%[[QzBTK2|{@z0^ $ko7P/$tVgytT釩Ku]sI!ݷh<.ڦ :op l059P+WT5많=Vo~=ʙ {<@eĥ]ifݛШz٣Ƞ^0LAe0:7ϯ)t(! q$`?4Hx1U5{s!E;@D1{ Ssp9C3qLn$Tq* ;과px3xìFI6R5d_˯%g ] 2qt I|AV釋ӻ evݲxK8ׇ%dwWoOߝ↙J-E,'ICxYz{qW|O>Vhޝ0*&]-t"pg>ƱJ#>P:ˆ$/N x# o,@h-նXtkA+ /DĴS6?{WƱ b@'؈L^D>eIh @#&p|;ho& {>cV~p2HE;=M>;J3\{ o^r0(qKMIԪ;*$'Z׫^B UjT n aѪZ-J w'Y%)e 41XƲT&A7;x d+9^qW?*Iiu$ۓN"MSt#;%Qy@;P)dT2]2V=QykcL!հh("ʐ6%SRGfւBnw5MWs$w+ W]22\CaJoBU dRkfSu\u$[Rs?Os}M*NcĀvws-;6;k'GW~ &T!=/xG]_._1=y; >uzۻ_?mbg8ώ>NΠ.).&Z)kF::;Xp}"pmi[ARQF~vw=ƘҜ0 QW3vsoM+$BZ*jْր<{з$>tB@S _x8>hQD#=` #<⦚Q1p+7bw|g1bb# *T"lR 5ϸwHK>cΣV͸QzCw#?Q(s=sǡś=kngTlȒ3eų5bx;F ðycJe EGJrnH-q <, S7qKĻFDt\&wKy5^H:8ȵ^%.2 /"/kߒT=zZy=*KLyJ&^?3(Kqy+1ȡN}K".QG_;qҫyXc84ڥuWDC]$SG #k^HǶ9k,Ŏ5՝kB;ݹF;\W6SϷfzZ>*wl<eŸVhUz zf1cO<ކ[ߑ1C*qMb)ӎL45L܈sj3a: *` B>^ t#rnS]W~9 !?GzQ &.qS9%G> /4`{xZY@ ڤ9b#sujރiis,}_O<,tn?&]v"sT)+|Ly_`exWllxLZz=WwѶ-a3^$;09A#r#ri1pvt$p-݀+A6l=֍";8?>ˇgc{u!O7KMZL?~ԓ$-5٫s&s>>) Ef+ssc&rϴ{Qp)}2>*|<Ҙ-g'QiVsvTz?gBZ#ءXz؄єL8+e;r y@L@-=HE]7KM7 a)Qóu^x&hSxhx&`nh\Hc㽔@ =ۗ 5}2 !jO>x3V&nbV&7b vSt_6B]4wsv,= -xg_ K6Wjݘ5|u?~]w1FSt4FuܥB{Mn;mX Q{9 pul p4؀07Ӹc%L2~pn;fswi@76?i~ifJ"7MzCBby{siqwϧaVL:朐KT *8/a!fj 0 Y 0%z$s2kTkwVX )0akD-%Ew9(J kȕR~2DGMeN@^tPoea $M+۲0^l;HPKMq`H+Gi͡S0jߪu7MYZB+{_cZW5+Pi5ۏ•8 {*"Ms 휇 y<?c r-W]s6"bT$/3x0Gy sg9ڋԈgBuô^}A1? 9OSj>LP~^FQ28 lM|z<#0"#N/_>8{w>A*̝pZ>y?7pӍGrK]eMHقQ@J EH|OqOn\qtO.'2D+Jj""3RFCXZdpcJyz7~F_7xf5aIe\2r{ݰS @OTzЫ0.r4z$ELqy;vCnU1DJ<-e6*hvCBN\D)Y&/o{31B'ρa.X\*="X\eڎ3c׶ 8M/h<]G;D$QBHRÌS'n4qERlo$=v{ o'Nqم-N5?j/گuc_:u/z//5\IlϤt[j*E4f5qv㿫qt ?P{WB7"]*ڞDV>dT2ejR^(yڞNG\_p)5E[-rI\3]cXMZQPk#s?//saJȵRX }Q雫0bF5xqK?&lb_;θ]dQnNzO̷~#8@f4\-+/ ƃ!"U[xdM;^Dۓ-i9S6㧊\ Q"nlͪ(Yc>t 9u1"Kd#{M#N?jlIJ8R:)KǴY|y؜2[Ɗ`U) wk$YEV6IpOv8'U`"o|ʪ)$L9Ҍxc Zi3$5&9]0Q5Q )KWbn۴LG.(E28 %XRR*ItR\l8RdA &i;j&m6Jč@ jR#$2x8Tpn*2b5Y) )`"*5ai#xJDE(ARKP \' (3e ׸LfA#4h&6|'B^Q(89ȅ[jTX:5DdT%wYkw0Ȅձ"r\-r8nS"CQThacp'YBM W9F 11 ƥcL;v4< ~G7VT!!^hSXN`e\^95=QHSȺܜL(+GQ"i|@Ma3DB8S|'*2 |\SEdH8EXXbhT`]*YR nF dRmĀ5w>L0Jg[v*p?4ywyythr0S6+ v+v nN bЛɟwf4a _n}>6a1V{1)%B!Nۭf7e2םƜ:#hN 9.PaX#ouL%Y~) GAIY0kH:KLιs^Ĉ16bC5%Q?x6|/ڳ՞=^otߖuJ `+P.P(FGwD2HZ9SU薙t4LJ|sbKEހlJ;i 0!89)vKs#rT˗lć78) udaNBIޕ5qῂK┗`Rr%NTQXcFDҖ\ ,Iݞ{pjD}t\ {Η"TvW\'z*ٳŮ5~21(EKVT%+㰥*̮^A,yl`yS$Q.0HMMW^f)W^LaB=T,ZJ;UZ3z{z[m>Ž.k! C\xkP;LAuxI[b$.Gm k{ zB&O(VsbV%)4[ R҆e'yxm>]7;yiv#=Z #.09űFQr&j@1eE%Kq\N\ilWn3mdxZe'^ިӤëwaLIA"]htjkg#u>v1HN( K 1@,QDF@. B3ॢ+M!afAuz-<@Nǻ7=DcHc I@(7a @{; ߓ ɘNoyA=:qn`:,f Jf0'0LAL (*BL"AؼakVn}1 D hRLs/  xL:у>r|J %é(XqR0adai1 @;iy /[`,O"vF;áQo`$1hsc¨HeL\`M5F *0&|w&^ۊދe m֚q'Z5Dahbf1A1lc !'Fw&@Di# 4Xuό|fChTTod$`c`w&E~T ~3| f>ꠙh>/os;P@3 t3ܺY>OӮ '̠T󘩿>ΏÖLmgm{6 :H˭\=cDfOJf6!(&I`, XAB҄aI/\8M`EA%jqT 6Wh6b҅og)XB,X7 {//8,71M?{rV͊qn۶*ƹܡbgVC Un4 8%-sJuʸ2pf>X`yol$D ?zt3 9kct;"d9>}zFY[C1;i-NwQ< 6m0 ϭjv>޽#B}ylKm 4iͭKM ߜh7 sBu3m0+ fM?|^uu_Kz҃x`L蜨`@fXJ1vveGNM!o jw)eϟw5am|՝X,F=e`{Gmt41 ꈆ ֻT, ќ$% &2P#U<`3>U{mmH}NhzoH֎($EF A!CZw4{\^ C+57To=J{r):~$  <ccܝci[] \Ngn=V w6pݚ!GEk\ Lp8;u@0%sc/xrZNkB@1p[yi/ú27Ԭ!8W~c DAz8G.'q&="R+mׯ;͎4Gz|xˆO xYYxk,M}l-7m,g5C]ߩ,,t۟ߙ%\,fwjC]"XqD8evjt6]֣զ =, qP_~/aP~|H[<0:ԝ[EN%ߐ>&/^A^>rj[n+.463Dߥ3w`-YbS oClwlɾڵEkH8.jM4h4GC~FEjV. f,5CsZU0Vinl1+3s>)":'ښj2\?.~0f@s`KQƚ4, =3c( :ɗ K3VѷiwDOo}ԬL h (&: C-w;Z>9] 3g]PS FlrB o=oWf!H1  !I@1$r9\\phU9iɡ2vˁQ1q|g73Cwǐ_-;9̅$qQ!ى k#LN*phSE2"HBQRHDY"p0VpĪR1UHQB57a4Np "ADMwV ) l (auB9vCYc#Hs?/ftT<;-fOb+S_9ƹv:l4KyuZ ld{V__У!\w A1{ x UYG^IJF]>X; 1v"FВ;F I$о02&Id(̙@(&SV\$WåfU~a^w Aݟwk#m p(wn {|xr7ʝ.yƀ!gfLt[oCX2 PasHE]QzmhH8i@ []ln7Y2"uj[D}rScdy 5 Mӧ:"B dJһB(ysQBIE€B'L QuTr/.7/<7`,v;h)yh  5P8yW-u`:Qp"yd`ԗ6h}WsY950>AeG"Hޤ֔tv>ҍH?6Lhh7' jk^}?)p+QpIq߂ًb 0Ի'(\I/+f!T0Fpc$o54H'1U|d#pjʓ B0NWԽb†PCl eB}u9&&8n7?)cm du[Yq^˦ Ƅ F2k-6O4`LK2Cɺ JzQ@4 꺤!!%C?bo̭.w/\P򃎸~b}(4F̉qR9o5J掙nBw$.OmWs wL v'zJ P%!jz'f6w+LG#8䨈;f~Sy'm (yB B͏HNwxJ~Ώlu/(9xȔ AӡRf#SC)i}V7nX3p/gPHIO=;c҃蹓; ~S!X'ci3(B8Z3PaN~)( ;M(;9̖;E3i=Sf+@UY)&Mm‰Qr}hZ)|FG_؈Pgo7:6ri󱫔|dՙ 7+bq}\do}١--Hā?~X]pxiI*Qy)H0+݉S@1(;uRD -u}z{QwChx< /=2hifX$  0":w*n,4_^\+N*@P$JTNOM c iަM` i4;'3Dr4bI4)?doLo~2At6]ܨWT~>3/W7)GVWGn43/lV֜Ӛmpul׮S'{%-D@r԰пԨ[H/>DT{*!ri) 6ҸN a0*$4& 0a/$RHWb摓0V &}f,2g29eQ\cҲ]ɨghF?d.%ZWV:?*MP&7hϤ%x bNfB/秉gAQ!=^UJLx%4I7-yc=/P koMuM Htmb')1FŒ"i[@~1>ŋ!NV{r9܄CsW؇^D iNtg/nXU3jkR_{}sz/,;m{qgh?;o{Y·>oߝ^e|m+Oɳn۟WeۛwLc_M5G;akJ>ùG{~"ç]K Ff#,9B{yZx^c89c˗//_8f,mo%PR EDF?^ ,KTc;&8YHmd-&䘊ɻaYlO aP u3tmm/|AZ/ŸNNKeX ~mmWPc|LN1&2ge(&F!Z򥪜V޿JySU0);8Fw+JbObga-6zPcKZܯ]]oAO 3.aR$DSԺP)8āTAvApT @Ɠ 94=pD)\h .-ex!iK,Ĥ*C`8v6'.N+9*qs)`:Ի0U.13Az@쀗z[倗ex!?T-"s"2'MysБlܢ6`9:"s.|a-Iy qմt(.wM )G@MA}9چlqF=YZ7Fon1Q&:tMD720 2\3S UbgdV;I >7}ZQW^^TnR!t(:Bt"*wvotTVqR uV;*3q^+NO1NwTxv}=2 n 6Ė, ];Pokun٫uz5ji{˽;k&w R.PRL9Rآܰ0uk O]XM|=O۶W%!}ٝJpSj6ivc< ,W4c#m^&ՇdNmF*+g\I [,,pܱS uli` f6$6\Ll /~!e*Cp/VA}1DP:}/77KDk|i›|t};,˓)lٟ焷3 %.-|, ŕ %ebe*-K鐑Lً"-Xmy\ 9[נ#wQhV7Bkuxpto~m@WJQeN,˸jŌ5JaqҒέ &A  &A 2/ jX CYR!u92= P6 Pɒ9ke,FIQ412W`=20zu j{"C*D y J [̉ 8׺X)8kZ++yJYW'Wi{=DI]OQGJ0E3FSs1$RJ3 L\a,WDM@8rlK;6@Xz-rET/URV{RM]o]ĶAS`{$gBrùµ+R9x0܄.x0ɱ Ώbu]IϦ؂[U2[pI#-<, `!V l.0Oa#m:5V2t!&1{+mZa^"r)/S\HIB<1boF<2>Qh8!"SW)()CdB~Z ?=K yۓ}ɉF^ Ǘп렙z_Qc+17ЃO Po$e6m? n o[%b$R6 1SFT]Iꀌ5r/ rXtA،<CT^AR"bV0DQ(:UL:)ߋ"sAW=U9y#0Q' RsLLsӆKϜYݔow,WMwGmh=H2 WK,3eWTϴ1Dj󜹒}OuK}%a>B.0l-R’(T/lTufK)%SuY | qe V7m|z`fEzlyo䃚Ueݜ\3P՘ߺ)դOOYy) ydl4 H!}x-Gyxi^{own YٚN,' `V{g V\s={k@PlŶ|1;:@JxnJSX V6;7+u`BSaHr@6<?!.qгKpZ"е LTl̘,튲O1[Q@ASu8cВ#O~0z1M9wݘ WQVߗV$FUGG E16j݁ $v9rzڦn'p7wh)\Ä |cpЌu W<7͝Tͯ›h6M L*4 }T\䥎32XH]Z٣dmfE&ӴhKKs1v!oL$NxΚ<nl+|mpG08>vPncW-k x RN+~5O?U<$E}UZҺp|WwQs_sV?ysS'ej^ɘ|(lM~]c?@0|8{hv\Vc>M?Z~Ow%<іΐ|Hj#Gѳ]6˽w[`,Q>͵:RRvd C.*={r%UrZvQJ2_ dkmbO֏?oNYe-dv:w`:xEW+ڶn\}D4Iׯe+}d5o8'|~WGNM;!$=}8>՟K?~Sޤ~e%y޽}Wj|Jx8οDITPz5կ|l}&ـvoHI?ͯx};rB?^dz0vp8~j));0놾NiON躲vO&_\oO3<4x7ogmYo姟-s1n!!wÉ2NS?kB74?7l, sm^ԢwEUWksHXPT(l7D+}VRV۟’`=csR؁ #;po&m2 J(f#vxPQI'bvZ?;$ѣ5ڲ(ɖ78Bŏ\zUNgXLFEV<\Je1sc#'j.ZôuaYULgՀf5a]RJ{x(M 1.]Egc J LhMբo_Kj W Lzc!ŔD6KtN YE dRX1LDcqYI}hDa&eⲓa-]u v~C|IL<\eݥt2\z2f6Ej!hdۀ̐4VҞJe9JuQh:Gׇ~Hk"#c"DRM1rEqٳG/2N6G#q/e0e ,% \d!EGr`VI 'd(6m}^(u&iD$D\Z/Ⱥ>[jL[!j_HRG/DZ\ɵB'9VQgUȊiJ஖$xVE 02J`JrY%RKI&N F-}C&Xas>|Ks*BU̅ U\h_W>o'ܒTTTTMv'x8ܹ]6uUȝгSx бф*5"LREe違"j&rOhDP$q0 IEBQ>\ Hj5wANY~-MߪU1}bVMӷ r]Np\5lwG!Niy!CU!+)gBe؄u湡M"2K;95Ӓ@fT'ج^6|kИl%f+KI!8lBYaX80{Opȃ 3./I ɐHY5d):o!DPP#g-VC<-DJDKg 4K͒g>pFE >f4XD 0X a c{qu/_{mvayixZQVZ|gZ=f֍|u/ L0b׌WqJ{.'F3n8?v[p-]_3>ɯ`j< t`4^loqbǍRrXkKWjp{F׷U=;Вf=  gs ֍.֨ VǔKw))51.qui5H@HXǞX(!4U>s)68P8Ȇ!V{ -F=T\n8 KT["9BueG}GA=QF;t[} +]YҼGcWihd> qk^zEڧBH6V[VI3YQ *259Op[#E( BQr]^9(0RdǺksa TJ 5W^kH=s<6 hG*h" ||xTpDV9zܸz^h._hS6uqlv8e:/Wi.(+Ow܃!l/'f\H#TGϖDI0r }:f\ዪ9)  >V  E6?x۰',ِD |}-,+fG 8=E(:jK܋& #Ъ8+ ˷ZqV2kZǽԦ2cAokY^ݭZ)9Qk,/pjVg2xבl·_*RSsbYiA=gEO|QgeYF'ۖlp7KM1fKw8%[tI,uۛLdQU,rM6=Ξ>tdNkk9CQA`:`M3\2ʩ_g,+)_܎2u"0NLJc[! gsnEi@%PzZj-d6h)' I5WҾ -.YfV[/} ŲɩnkShkZ3c̝AݔPd@BZ{[ogqL472yp/RW47 NU.V"qD͉mr )S)J“Xqh j ث\ F!rYB=T]hk~fsY BA;c, B%l6,1R*+YmX'S=J{MJ?/nUr/[t*X{ qƱF![Jq["W!IdDd9Gl)'T/7 &4OA!7mVC]24Hvdk^UhkI+#PmV^/rKd"aё2SYV66ZВ̸LYtuFĄhJ_U.Wa#.Mncѕ.[sѭغ,By|Ewf PTp򘢳ln%o\^nzrCRcS[CSsw(BW9u%ʓY 3!3C-/::Moy T.s!@<fciގp 2$㬹YX:w^ɹ+9io7A ߖ>&&pΑ ˲ACЁ A& 39'PVK1 +i~,~^JDW]nmWSb{ pXroeNsR->ei^ fsžt]ymT»c!yTQ0  dEHm0,1lh#{yQE,',Nu @9$x)jsvȽg^GTɠf;h-'FNR$ L̐4i76HaK"'| 9akbD2)!GL]w?\8xhrK@]{B'{\ .xZ ݍC*|rir숯{㶑J/,~Y`X,v:Od]rI݉맨+[""ŋ-?8N7%SN ӎC"p3ΔJH#91) 48Rذ1G*6q5z척[%( *w߆79s%+R Pkxvw[1rӋlXF_3y+ bh0oĸ QAq ffcaBҼqZ#aSuën?\mżsD<#B2qAO0x,R*?B\'MϏA_;JlH^SRYZ+âZ񾶎Y<&z1W*|P`J pcT ;fhѠ]ɻ5E$__]\h`uu1,`<=ܺbΗ*fi h4OG匡vr/H c0LlnhkG-ŲF{o7.>P^O,܂u L><dc1to{o7p[ooϟgU l?3k:vOC 2ADD_=[ Һ2wt6fs xGzʿyk^fʼOןݏ8t!x ^u;y Cs/-=g~Al~Z+)w/v2\IYӽX.%:t(#1c%%Nd }/J)(k'إvrF| mO5nT~PGgTp8 ggY4L Fu+[]۟IгbJǵ]h3lǗC:$q)Iɢjz=\˴h1YwYLî+^pGE7h= }EÃ#,@71 S6 N oAxH$q5|Ϯ @ YĶ!FƜQ5y=8e"M ȡ"A馛2ߑb3c)$hd`E$2MDF*EC+E ``@ v 8`B:%eq9)bIGY 01DH/DMKo߻33 +E9yϿ''eWWWrmoB;#zR== ʩyYL'ػlw[GL`i>-XmN[e?5ZN # wfBIEO^E.0%%\،w,k*A:@NEZv`pk{b脰sv%̜ac~^9 /}c,'Gܽɏ ֻEH~ cH\51>b;ŲhT_L֝y{h凛&}pSqy\Ƀto^|^}^۽;`yǯ,BKp5˙|zך5a nci: !'D"qs+ 26x>/lMUWRыKSfHjE RX$Et%S(iyEKu2הOu$`L#Yܙ8K۝2il5Jrͦ $[gCQO qR </(B:Ax!(¸zƼO,r2 RQ1A 1bBoJ88i)9丛6y VLЀ΂̬ ׊HfV0R ?[xK/# %3f2$ [km" v4e2˃v2s 60 6ia6`_rͨREz0 !!xjhkiw&dB*whڸCDwuv${|\OtU7s<96r+0vZs"`b<Z`&(JrwP @jNm|>z 4tθzY{W#ldvΞ/}nw!~B^Cȭ!wfwg㡐Gl s4I׏YHO[ I?b3_}dcYL(1b$zmxgoP<&gW ޢwv.>Z;ƻ9I.rVT뜕1D2T KDZk Pa=K[dg_=f ltST1*b OQbWQS 9Yg]I_gvoI旻h~h^W7_u֊= J {cf+ew܎1c^W;vMc$~.|-ONd]n(@茶pfttʴ\!1i}*}_ܷ@:& JT=VzOCP Jʞݯ/y2!f!GR!RsXb_~CBDpkǿ}W.F΃Ƈ4ll=J?1QQz舨S1D=Ý+^oţS[q^}:vU_ ?ڻ]"C߇7ӱ B!csuBaޯ1PHb㢰Vꓴ^HDƥ3[է-78Ҏ͒آfX(.P?7*& I@hTLMtgT% 3*&uqh h(0Hcf_,TzA>t2L:-ҧ;R}>\:{<$؁);1/3^f*]| /.e1o5Bƅ]{t  d{B{ㅜZ(==1Pۧwi;_þ1P[1Q:uD; "Mp%]kZlSLKG]`ȸ>^ퟱ{ 0JƸ^E~?r/' G*:ywN?y#5U(4 Y$H`-RD2hhL)I Zbڕr(l08Y<] Vm ^a`u00Y8:2ij1a#KRb0)5ƨv:X Z2G08ֺs !xߝ[ofcuTgCG~pBs-eB+̍5I F k@(w€! /4KΧCmt Š3iyZ"S^@Rܞ GE*#߭j(d"@i<1<D0 4v&*N%XJR\[)43n$&I<G@2;]R>56wJç)d` :]=Gn [$n,vou:d,YY ;O!h Çu:Z;O^5;j؋q| F0f96g;`FZrd91d1~ ,'C: s Jh)\t@Kw:B݌vL!w .U~Ax(BTIwPDGqYt%,dZ:_l`JJ86rYX(  dHh\8B33ϙ^p 9(ֹsv>2o7?槬yGo?l2ZnqqnQ7wY $*B;R:9JzhuǛ _9Tw,O +Aeyx7u<Q濚~͝ǹk&J[k!0 9O$$\Y)֡Zc=޲1EGK$쏛ZYWIpV]2sݓE"/y4)`? U3+4Yͣi޵Qoil`dl&_ )ƇwL>deW/r-v~}&&o7ٻu;1eE|~u/˻~usDuDPm]DؽHx_*%ԩX_v(\fD0M+]wM,ΛZ6SyZ,  trB#@~U@b_R1W; Q1WGC)޵s=a!Q|mqwVL+Ekp,19ZrsBapD89Q<3OuВD:D6 dN& +PP.% C| \5k 6$ 6hp-f)Z#|^RjGDrfziuҋ<`K/RSd '- 2:tL^.X 翖wEL;Lc Xb̓6E$s.Rr55.8vUd^8 r2ks Yx9 p5._*jElaS:*-;f]THss~ji//Lcx:IG\CC½f 1uqp>PBWZlt Ä.LB1ޜ8B ?E1WW= @ N|^RH@-TfR&i*I^'i seO/̧b2וNbeR/ E|xDŽ(N 0C<<)(9׎{(>ctKN{΂dNY7DgFʑL.㢧Q?'ٝya֟ʙۜ!E"AKR H"bH?gc ab#c1lcs;gxPط~a'd<&cL4G8{jy)?> gF3 *~l@sSk}iFS<ftFV][o#+^r$-Z$ af`;'/YĖ< =EIVKVwb m]ꯊu#YGWL*- e%lVu5ΉMÈ0IH& HJ3F#F8Er,U*oWK=^6OȒ-qF7m=_sɱdhY߁wG'TMWR/G ?nG9dizUʁ>-B !t2.JY3Q&8~^GǤ W7v#Rۇi1_/_P\;Ze21*sYtB?vI,>˜ǔ:kkk)x^QpҲh+AH2JM.w;ir}zޒ¦;5P jIׇv=f'Oޔ0!y(r=_-v9T;!.W9=ӽ0gT]?=3_zF 'qOۈ_A$Yi赒?}x2]8nn~rjٿ!pr];w ~|2"yO^R\ZS͸CA(SEK^u(e3VUU=S[ (E59l۾hp5ekr=X)tKNp=`!5gN|ԡ$ޟCD @uZ=NDP&{?6I?=}_Z쩜ZT\!DZ,(,iM zĂ{GHp)Z0ԘL\g:H- &-vz}?jg7Cw,>2b*><0ASZ(DR [;W,KH%{1 .p)H7S3Q|3sq29w2ejz}^_חŦ׋xx!,$͙P˩3$׹FVtJR>"e<_fȟ>ǨWU?nNt2[=AƯӇq8 U9{&m8 CɷbVh! G| e2̗0_ 36hU`N\C#A E %DDߢ:xrQ]vNLvN A=A$V}hoj5ݘ#QeZסQR|i] smvT#]NQ^rD,Q'Ow/ɲ]jJ } cUϔ3N/Jcet{ f|T^I, )a@h"F0`yu8IQeybŃㅎ t DN+>x.jD i &IBAtwE;rϙR:/K,b: C^Si(;1$zh9 C-!. 1#ilÕf| WTP#EHYXX2!Iuw6dHLɵގ0aJ˘ Fyo9%j@4ર ځ1&>ɸFłh*ʍ0R*7ߑvYEKŀ(E*+@ƍ*d@"3țVh(z=[2$'m7 o*K^\6ZyJP;i,g67ZyFK0t/{;ՉdL3J87R)|lu@H@F;Z)s9a*pA`xP[ Wqz8 )Yhۛ[5ɕ^L33 oT ab\d8])܉^YY:(zˆbWF))\`dH F 1MPCǯpߡ{X㚤!09Q4KA籆5[ZCBrk8/խ!0#t5dhiRRTs xl )zFPY%jT/(mk)&US=\IȞFwydҗ0YnM^l5~fW'vaZDϛdLhw݌zl?\l?~Ao/S{WaZ?9?xP,RM6.Ϲ}LLA/c2^z aR7lF,2{z- nSY.?IʄE se5ԇd'HC1J_f6n{w.y*ْY8 \N)_ ]"USq-:%p5B#Ae_+d/\x!Jک0,HnE&41u2No^b2c\nDID#i!M[eҚԝ-M9K.W$V 0QeI)!gL/LQ 9/ 0fu6 (ns'P*5sE ZthW FgD49޸ndFjJ+y#_SCeF%okp`յKogzSEBV`z-qO]kY$hrk'f32ϕ[hy~#ۓΈ*T:f:[,ьNcSjsQS.oLOIXŎ)Ԗ3;) \D;Zpy⋽DQb'BkX%`P6B-d7!.zӢhqQHZ#N&MeCaTJfګ 9)sNmS:@Kb䡘`ZuNJV6L1)4b"cqJoX("̀ԊpyԊc's 1cw_'a^iok fnYҩ˳2sYcEC44d/\)+,*s.:U갆v}e<z(6W2/F=zfp"w̌߆4Cc1W1#]0 $cwgW?VMk8zF q&]:w$k$V*@:F\7q %PJ|eGn@"xCр*\Vked%Zfʔ{9: yg#́I3!U!: XœC5[3J ʂr2AnTbRtqHAWgN:|)kPZ,~JlF^Rym+چ g*6 \Mm(u*-5_oihvt'R8k|Pr[Xr휰8x5gD'NjN]0E[nZr61[зơK2FLuK/JJ.T)gNⅤlg+thu*cH)4 -iG/BԦxG6ALZ]޷f)\FwLwV唔4JV7RapnߓUPQ(E`}__Tԡ\J0N!q* I^8+ 'f_ F7]QP種b(#-=krU~PR.9Aɘ.c_%Y1Y~)Z8>br30RRaon|_$I/FwQy}4hI#c\"%BpKɃ*m`>\>H"O>bUNkay u `F,tF0ıh D 7SV}2kQ  D1h˙āb[>jؓhNhhn^R)dZ*(R`q]$I1fWXjG8<"A0ǁRF>bA> V- Z\evM)bz00J~짮y놟U|z>~Ŕ@by-y{5Mx0]xlwp$0|՟k| ?f{_pgӟBӟ=3s>hӟaADڦ,nFgXdHbq.S.p)UQŸ}4-pcv:O:TϘcQS ދV_-5lq5Ya~(_C N[3 Fs#ݒܤ3q()E]5?.iye(fӆS JJotƐLf2%zU4H9 h_5gd<n<./.0j,[T oFwoLw`{ rҹ9ݠii~ 3FOfE V=#\)ݻ#>b9M sg>S9#h3donzK,T.㢮vw\QKZ@t{n0;pDvqtc{ۅ/<8zj}{J}Hi8g^*ZK۪74=@ÁJtX6X~F#j+PCUM є% L N2%s#W"b-r0zuT[aCBjӌiв^' lZ[[(?oI>o< KIwEGk;߮aBfO$\rUI>L^I.`r5=uqp>M'oV0C*L9< X В綂SB}4uBd$`4juA(vyP6%w>KlO'!&"6rl?5tk6#^`Ak%8gR+^# _S 12%[9%8Ҫհ[1euq{(N& RѬ;-p}㿺 \A c=K9$U" cHGpC ̋Q"2KHHt͘cYZ"p{d1R@z1hN6Pw,7,Ƌx-J3]ޱV!vJCabtO<[_!dLLc&qYl"}p*yだ*.1 ְ-*č-8)]1G=;;~poDsN}EЧ"yuA&l<^&a!Iť @X*^ 9f[j>i$>4-aj E*HU*RIq g6 ܈ 3#(8S]E}Lu`_& O~яvg`^0Ŀk'e1#,S:A ',{%ǔ"ȔzLp_#;VG7>XkO|5KŒ1>P=דW7c*x0xաE'?@mc̖g>T]%.9Tpr\"8b;x5}Mѷcoy犨ml4αDK"D%ů$XE]jWϑ)Hs>PT&}'j4NPL$4EUo*Z7#Zա5kTO٭o+߮=ュ,bS+1[3~0bUzqS!WfnwhkT )*ꂤHMdܤύhڝiV)Nj~OF3vwK–сW0A"3Lc'dp=50tWDd'x )襦\|i^6|tc7h< BT{ЦE,=h <Qzl?\}SJ%Mͬ 9 `: T\{ok`;PY9l(le%l5TuTlJ،cF%`5*:u1R4xƝXoBT89ڈ8w‡_a/X=Q44 SxuEQkK[)D?銚TApI/ѷ``xu/͏Ҝ藦 NƩz%(df14 xȯ]f$GpRa|h\5ޘ 1>|v__g[s9"r } *3 k tH 2w2M5z[@#91~'2ogt1e/zdJF`.%o 3t(;xnw/v#Ҡe3P7p48Llf]5*Φ? .?{:f4:)zOևIhI;0@p |yw{3R4ccS?GXql/{Y2#fs1s[홺".`"7M 5,(6@w+xw'؟*g3 rL! 9F{Gz=o>BlL?a;@t&#JA-9,Wm} fxۼe]~jO= oSς1345ٻ6r$rfao&w8;~AYd'WhI--vZ$"U}[/f?,\-n6u`}-vGo蚡Ǻx_Gg`;/q ~9^dՃ qS #w`)cy=&'r;COKݟ1'P%4j ;F;r ё>q9_^j:R`>0hCi,F7ܓ14` x gzgpm#Ŵ 2M䤣$ok礣+-[he h}zE]$j8Xf5~+\dJR~mOTek60=%(\EvM)+;& ңAޗC^dLc?XtBM "  bf:\sSZDŽ"LÎ)j8T+:/X4x.1y;Ӄ|q"w6drу .MZؐWBQҌgks_'5rΑΠ8ȩ󭅆h7wee+Gpe~,i=޲wvM=:/aQYM7|7aqs2_kdžkhږȜX?kn=wd.f3"׋J+m 0皫@:Woц)_obGĩf;dͭ巙_FV-N`hzm _W\.Ñe>+"rʵ#o^ys ϧYBev%T\sA_oOswB4OΧ]җ[JӗLguk<^NPIPo Pc_t贱;Ե]3?p!1DlA1o0(8pFPcv4^]jV2Ů*<׻' E Z&C iB]eyMʽzWT [甆(n}ƣnni׾ЁWCB0[]RR@&QzB[1a|24TW[(WT湼r8j=dbT\Iy|ڭuL@AsʹD*9seLTNe e, (SDyGAq@?i(|R*f?'s@y^/%ID0i#4s'GՖj~ջi3FTm&瑠"D ,yeu$2ge o) T&`bE p^ =*rD ЄƉĽr)2J;i(ȄllOW1s̙qs ,UMt fZpABaihQGWeEuB=YM4Cd*c .\@aLA ˜1ڐl\i]TZqvhf[Jω-*yJₚPPO2P"dԄgx[girv|堡#V)A*iZN15!Co/zF5hΈ :C*:C YƯzFzZDX,TC ;lt@R[,iX#h Z,È)&M !m9$zJ܏xδA.#y6@WJ3+|32!a$ CUal6(tY\ uV}'9l!eNq[۬F%p^ 0KZkIH  9C4xsrZ 5+>Yѱf.n<#ŠFe L u,h"[ # #dEY%4*VȆ=# |z[ Ч\N hF hG6'^7X! ׌*M.} LXOv" i Uy'vLN.&2Y!R7Ir=jI}\ctfRknP=nE2يhk#6"庀H}O3_rf\߾Okݺ|zL߻-67OjH[͕{#}\!F'\:R 0Z_ Z,x<.yFlxuu/nSD+1<]0=zkɞ'ϝ4N%>LQ_fE+?"}%~t;DeԘ\ކ$ cn(#BQ#^v$^4`O{сa).qWq7ѹNF_09ҌOI_QSiN1{vיX7lۣalJ[݌?t=Cx lS,-Ll &o>b(i73ZN|0{ӝaN>@cQbcA1>J٫%;B~&_=v.41(О-N架SC`;;nݳ]:S5on]M+ݻ۴فYeXwf@gߎ3❋erܼ&m@rrю褕.@PB`Wj$ r3AJ[euC,Q ׍X51:_w#F7 o)7FݩwWNP&j:*C3q/R.6M!f$ "iƩI(h!}_Ӭ]R˼:1Fδ6t1VYU.pIjW'T 4kS mFzrLڃW`@GD LjRWRTH< h$w:qM5:bhi &zG)uX! 851b*A8))`NFcGL&&Ko}Igs Ǡj+9eyJByuȯx +\SIEcBO┵!*p"1%Г$">:Ы9j.E%DM -Q]qHH A.E6wW9-dSRk!ѵ])3+6\@]3rR!tb ?=ju;vg>4qgxeup;gb[-Bˏ[÷vΡu=0ѴgD#Kcou.`FDj*s/4Ǎ~]RRyL ]e"71mx|^)(thȤ >d2)5#5D&L)ݓS ,j1.\;m:HΙjR8&t3@E/kTP, Z5R׮z5,Vk5dv#`MpIrcDydA_m|{FwZ!fǙ,oMjח^PP:3uiv@<!AUMn5UDh)ۥ1i ;o6>cS2jEgﮙ< =OK/yB/oiV9R/M R5n |uTy4B:V\^m.關}i*^#թl~N*IjoWq>fiNE<|8k"F`,]QWWwLr>]B$mLuBROiÇr3xÕ8޲T3>xcG<*K|U4ͮWti׺:qKsLAn=[ϯ>aOvGlz|q/$;p1jxUۋ2nFv2؁\P"&sQa90Fjz)8*؁F4&^a/WG݌9Ӟ_>:fοZZc=P0,q^*9wpQ5|% 1cwrp3%yK!n7"ŢZ+Fr(.ۊHg,ᇖHIZ.B\z1ӻ Zד %ΙvIIE< DWR mb"-8 -NFS1u!+u>w[TTQY"YZ94zV8S\Q})D"xMWKΌ& 2I.AB7LyVi/3J2I4\6J>/Ab`lN.g$e֚Ugh>%#7Bko Nb"DR Rd(+5'}/ϪODF糛:u9J(/֭XJ A_>[~ޅAhpM<k,珍1j7kWV'%TWiԆXIn*¢UmXGZSl$ɛ[՛ܡپ?2 W5%]iORR-iMtV<ܡ:-{MbŇ`ݚb:MQǺ൨ެ[s&Lֆ|*HǹoxT֭)9u9)0ͺ5ʴnmh"h{ޝ.ےXiѯnׯJ7{2iЯ26O)2~I*&z Y zPq2V NUIpJ$ʍ󣯾-XOoEqCK/׺2w/}w|ۧume Ж?lo|6T] فxXr'BN2poTyEzB; X'N[{TwR-.~{ްz#|[bM\ŃtrDDJO;wDRT!P)R媐_ro~QyeGp{[h%uaItőkSTJq%[Rl"YeM qJqt 8?O( mp?k砽4X%!}J}4 ј\ҟ5r:7ֆY,şq{ZI4 `A.?+)8K/Ƭ#x\(*yG~nKJi_| 88`K/) $(_WP|?~{RQ}yQ5T˨~nMTQKۂj R[zǜVYہ;6L)| eԺ{0j a6>ys7&Oz|mQz-?/ Vڅ/KAT  7H}%kV=qf 98 8$z-f Ĉm2 ik puأG+iV⛰ ,[e As & 6GzMk:i0b@XNiJHOV!kPe|V?J6sD% #2w76,]qc5lbY$w5ݓdZrvӦ8t#-ee[ac,L4A9OYN9hխ}u4耬3ڹcc6dLʪx8*7[ʫ5m5yŒ trBN[B c>)s%Čbc쁹{_b50Ɗj-6M>7e7Wk,!ujgV]4Skm *ֵy^T<}w5)898=;6$J0;|rMd& *c[˂Nclx{hdFpН$M! L-r ϨB@%*IbqyL UA訵XP#id;@ 쏿:q`CӥHVz lhZq2E2ϪZ;],D-y#AK2ٓW洷@M&WGܞNXA-Fc*sJ/jB!2ir#`;/og#HJa+/wv2vk$&kI%)fTVDևU2F{e ]X-)Ѧ`a %V)רnJYFΙQf͇L86"y(eW8XœIeJm*BhzNg*Bf1T%π2?B'1X*n,俷>xYtcτ"-6Uxk 764˦򅦘Sn  K  s^Iu+,Kݲ^yW| gA9EfbhğiM@+~w&ZAKeaᜈ|CbחYpѶExy\ \YDeP\T6˹2k"b۩9&\o Wݴ=dU;u02s.3 ޔ?F x_LP/R{MիWE ceEA9iaR0nQXMcdЋ}}Wk,#[4Sg&`aĹ3dOi*~%C!$Q9bпK?b~~Uڛf}ko)aq7YHhv_WXiK%Ă\)+o+sQ-XNoו~&jM0|noc8JgQ.F7ῚpLR)DH-[&kxX? Lj0V'Q7dШ܉=$/  A3?)vvuai]Ww "BmPB"ce4 OiiUV I{Aѿ-+'ؽ|Vҭmϥ酮FoKm1F`IBy5ب,9Z~7雃p!KW3Md?<9ϥMl;Z\`L, R?Nkk2(Ҕda Z3"n!çc HwCk SM;tqcR8x~w+\_"%>r%/O?-o,b 19;3Z}%/8Ȱ""CxJI"7S 1X39] w*! )1woDvJ{AvHc8ʬN(9" iDJ7 9B,3 l=CU-xtoh<`ڲ1P8B[M+nQ4G0AZs3ooլ:k+R.)yˍpӭ \Nfn);EH&Qcn3Fyct0E<"f`J4#l6ABH# BXP p"ډIjOۄO'ZsőkܧrQIiǝ_rJi#5xuԍCܝLq\ yi>xA]I E;-#{! V?.8]tANoe5(+ ]YXwNpk*6CUƵOʐي|;łǽcUI7UdޕM,EmKh֡ kݷnU;ukA4FvHDJno4׺!_S Dxb0jgp\4Į zj޾Fol]#Q=WEt nIO_ݪ>Pi)D>;ZIouuoQGL.5MJL8.vb\H !L9mQ[sƕzagHk0Z$Q*D{]]F.S0U/ 4 a~zAkŃZ9?uҔ5`qW ƱH %3mjMi_[\NjƝQ9WR {"׆xM%A#Yz%!z] ԉ7!*ܰ&cmL4'‡@i a/,Ki KJM)i cXȮV!#ń|뺫: O9\Hz*YQUR0,y?5+`2 yVnWPxQ \T f|u;rX +;__(*V317띜S8> x@<Ҕb÷G7PmI78:,^6P*hs'ZKkK3 B2,܁oUoXSєю̞J^AedxLu5w1t͕rh֖>F8#.\F{;쎽9vhc/E^RJ>8qFY'{zS.{jFFz^fU齭`GUuOT$xvβxs,Db:ūWb\Vcp⣮FsUj!sYHjHhd]5sy1*Nv uzB[p`t9΀Q+"u dUIuI̪rg?tKRf`D F0V^TicW #V2 0(b),jR*%u:Ah| z:lш $}~bJQW 53K*{+ʗJ% p5`=1ɚgOq<\)N, Ic,F$lE3-aLIᰧxa=% V5rJ$2 ;R,dT#ՒM#T + QB# sW#A2 ُqNRͤfYj=<.GKqMޝe}0 )'\yr?.&JW⎖ukbƸXo-KbZW+mkMnk~v jAeu\u[Vp?f7?vG!(yUDR*ǚS8humq:{vsc̓b1>(B,/N vHO~MqYfHQ Ĭ}n]%7P.fOi<^g|â=ڽIyy͆>xlz.U$H (F#yr QImL5꾁Y+=ыjF#caSOh~HX!C+΃%/>=0"@:I^ .)&d$'>P ÞrU ) 9'jE48hAW|<̟nb P+*+87@ro'& S[4SZzT  v"WYABuԒcLeN˜Ts闢ruM,t0h &44j6!S2DS,Έ:z3#> ]=VgULpvŞnw%N#V!D10`oTj͈rR Պ+ [\S=%"e<"eZ .ր(F/]`}ʽ NRNJ_ 7 4(G@;.h=CS4Adc8U"'hMuZ9":1k) nυ^ F\q /lL)0™ 9IH2(upX?qT^Gt0dⲝ\wI-7"Z |(p$Uܝ[ܽ sdcyEsF d]R )qYŽR(OOиb/306t& }:S :ÐB5/V=GT*̱)M;^7sv%^34xb['&4'r=&5il~7,O?i%J6jla[np]>LTM{f*՗6YpW[ߪ:Č:}=Y2SGGm o0,Q2^bOn1*v(Gqm%NkRnFoLʭaRn0''jWs Q-lqJPs`ς0x۪bАT\aDZq-TuކdԝjHHNV\0No*g͍>^KjH|6$J9Ë6G8;d7FQ~ɲF/H͜i-)f048(5R{ꝑȉ Tby/Qjf 20Mu<兀=OV茯)Pr(`@k8gqCpTHH1@q1"Ywtr˜N؆H @Bq*G-3C8fVԂ = 7ө5n?."T]8e4[_иHbZV?>~M_?h7^ɺ%ӭ<1AF펿}4XJ>=G1OG]ԺU+o^eW?=O "(Hu_z^jR-*UҐ\Et^w˺aѺ^wDVAꔾu;b$9Ul4VδnChW%:%[뽬[ql~GL _ou%PK^s1 E,pWĄ`+ C_^ E1AFkX+"cR:NрA:?8%RLop8x;Z:H;ytKvnrPW}Нplk`qZ|t28iEz+C?ˀ;VsᐦEL?__O)(.Ѩ ҁ43\& "/wnYoݰyΦ&CD`rhk]΃uMxxYp!8p$F|Kz%aypF_r[E Nu.tSPc"N̅8_AպO&Ԓׇz d'V)jYBMi'_k%\Op<_AP" >%Lçe.)"a V4}sQ)w,Oz1Ci"z!\:j#ǂTSŦ!9m '\ ڥ4YIVFt ѲTɌk&N? D2 Pbce l? t[a3` "idC,n:q@;~h-j +^` 8Nd*:ݒ.8E5MBA\Oi厚"9|fߪRJX >BrITQ"|L~d=zfhƸ4R79LoFk>g'-n3q$,13L%k즇sHh!h$礱@uE2gs-r^{DFw>w>YuOLcM Խlph%o?y-uIO8j́m*¡oPt!-tVNp  RD>AX&jn@iԘu*"*j:ޱois8#IϺAôÐɈ '-e#aZn$9l8d tNc2kJ =9>Gr4,p퐘=`Im4b¾D2$NҳqN(x\]dUOgVYLpU'e+t^mm8cy=`;4J}u/+Og!Oyar r\ tvѢ7Drfsu@b%1). ^;rsMa6S1INƯO `('*$Q;$~~VEN$o(#_sU7ݒ4NT1 U`;,YE0#>c!A9lD&<4rtcpm_ΛyH#Xy{, ?o9ySl+j6Eʫ|:5|z|^||*Fwyz<% ks0I|X)txR|r)_ÔX~6C)ә\a\FRHf"2LE[- 1Nj4ⳞU}Ieޣ/$ogs"ii/ҽët*;j;\=!*ch!.X(&k ^j(hH3LH!E@1g ߣ?_|z/:u[(}F/>o\VaN⃎S)_HAǹK/ q R+@tʃ_L紵3{|oJ*۫m4AVyW AD-7h!H"RSki\$kNr%qȶi/*i/׏m@~G=oȗ: a|}zܜjYf4vZ8 RX A(eA3# NrZn0my*.L:1P~ި7k3m)mt M \ g u/n$)bAh|z3p΄"}vض|w֝btav>mWҧ.:8,23&?s&2rmC$; 2rnl;DZg2#K `8x'G:CrQG 3@ z]}7!,ҝnkpUøDqF7'(# T8p3>Ĩ2g$%1"ֱYBu0sr,L'0L{ns@lce3/2 1%L)kBDa}0WNe O{8F nDb}thdc** ~uɂk/HsN9WsWW@n:9sm=GLHC\l 8ciZ*HM,Q07h)CA MIr|0o9)CUӯ?2/ d;*o~Z2U7+mf϶HP)7-U)UCԶ:*+4܆\ &1cXzՃIU)=!ċpϿ <[:qD):)[->onq)S)|j'>B英KM:k$HĆn{:@I` L´C&`!ڏ:Lۢ{`v3iK( IH Yt 83LWRJzeA1<QO,O2"d(hH$A|ߍ}X)As1w=H尷q7_;,ff^y`@RԴwv_&3)mٖ۔(NA2cR"Y,)IF&Ƙr2ZVXSM=֩ @<7c1%RcJA=2׀Y1 ԍOD!DzĹ4цYUwdJ$Y{ߘ11&*`7CAg83Jnp+k3K+d@b 3&)pGĪU_޾1EccLOpXx(뇏 |Z86l!wzi7$֣l]AkR?ݒB$ߣ4Xxqroݻn@l x綱џgӏoX,Wz:>^owojffAl{kd+ܭA~=3w}ır&hK2(9Mh"&!XzQnmt5*{*stlY}=eޡS2@qI"mEԅi ^ %8sRY{(?Z*a<7PcucΖ{sK=jn۬>ӭ:@[|֔::yBJzP GOY vk2J.ϸ-o^|F3&@¸'~P;/10۪Nv}]/SFES#*LhmK@o9G5 {+l"dj- 'mY2-]0ZOg yFg kFϖ ?i|x@s _E9l_c0Khx5sHD(8T޽ c'G~ #Zp3Ҷ2I:UO eȽfRwWu[zNX`Ƅ:JXsEV {M^UelPӱrc`u9vZ cu.Z+Fvւ=Bk% M@!8b6CF1vUvCѯ[ b` /7~p:P#4J8Oc3_$9 @kAHs$UrΔ3-j!˅ib9qRc|5'OIX,dPk*5\X 8R4,b(I;Zgu/8B1g_;94Iss밄Kj*APg! LJ)V)zyI_)nknd˝y,M%w1>3ѡA?ywp+b^.dq+-Qqa5Rs 2Yu181vM[N5z&p.aA#g0cJ!(qnP di(ˣBW@6Y )BFkIiwpe+w1+\/^x^~\tM 8 yc2w;ܻ`<#d:xc>|Xt}c@sAaIOz;^0q &zDTOZ'cae6C>@bl|6RNz1ҟ0&Yg\'GyQto8rr=]^yd^=ڕ|777\#ȯ5 rAVaq:AٺȒ9НE _oK } 1Hħ698zvgtu5$@`(0akAպ,"\HgU4""5K~w*YPʘxӴG/g#zð@CZ_#&G^pd'B!? š MD${hpC=$[w|<yXHbֆ'l;ÚGͅHf3nA3ɉw y:nZcA3i$"e:pINc r5GIi6ɣTHIV+jcUmTx*$ t*lp4VaFABZc݆FTo7'$ˤ I~PHwލ >+['_ꐯF ST©׵z1.QB@B#kl|P/FzR?υz[<8fI E0!/0C$Legħu\eh#msAr.FjaEyf1i3$9yt":GecÖ a 8AL@&d$L<;"C>}AeBT+{?ύ>ɥt I(*'!C xwC-'C'jΨ !?bTTy;=:n MicV O$@i!6g)ma2 !)1݋]\˹G9a9Js C P. 9496ns~p!3!k|-[ucAp`5tW8Q;HĹ/&gcA27o<ٿwI9ny 5 gnVGI9dž',2rz#7ZZd7j?Mf7=_V|N6QLJlsuҡt  z"/';7bXg%3G$l.ɵH a%ܴHq"eLPH5LdܻQ|4<\1݂ahr~4=x <܎+K6U@9cPnG|'g )t{'mC.} U˻wlTv_]pz1-ux$)ܷ6j>&A_OtYN 0zWr}ub@ 3xow;۾nU DE+VD|_vӽ8EUVvPWjTDp* N AI;]+5*RwE{*uը ځ˜)R(;.ԕcUoxw5xQ HE CRVueZFc; oþ f^ fFj`˰Fm`*!:P6!à<'^۩|xjY|j&uT\:֞=ցh%N櫛Z˴ !7dsOcO`a)"~Lj//ݕ6u(,CG֦Glg/SKOKj7dcڍ=w?wY(T_`7k|*ƜXY |\40VCV$I'i(vVIǾ( p$5Ë &2V 5xЕ&&us P*;㦙 I !zЫs|y.Hzq\P_*ʎU𺆇Ay2Vw=^4 L . -; 9L7ػ^`9n^f+D_UIE/6T.[OA>5i#5tr`{:&0+4lj,M_~qɈqWKOcȐS;U9a=':AskHmHmOYm{@gl;: ۮuXe ecԏs vL45 ҈!tE-.PPrC>qR0!JQ`= ПJ_*ZKoy2Og^ě-._RՌ!"\MWD!^As^01vӰK@^3.,WL DN A kF4 ri̚LؙsmR1‰{oT[Q! -iBǔk0vh6zNN$PY ! yfu4(gslJ ˸iꔮݟ/[[!3r^(ϣ(|I2!:SHH!p C  GA1eZcI$nJ7aJIF5+/42xx?e+W#lѹ:JCDj~xSk?h%Lv9fl'1d)}[o.9`@ucԥ)MgAj}eg 0a>WisOuefW/FW09k  #Y1ŃVe}p:Y.|gQ[<62;|W24ff|Y%KUrU 6Yݍ\ <_A;J}gs,>_+qF%Q_x Fw{1u=0(%$밻dJ]%RdfR* J_AF0o04Ku^V5ĵd*,iUXTB^ZtB}c&x*3YMŬEUO#ѳǔ@~Ss333M%cYo³CT mH`[6bQhF,! qt֔؁:N Q$ijZܶtv@4rۿͼeo(!(׷r(29)mR» n;Kk=IŜ~54jKvqeyu!a]ٮWDh>LUY}q]ZLgE_8YIsVҘ`a2W"Is|iQLobt6}juZ6E}DU= `ѪxX\kA7{Ѝ=M=͉ZG/dqW}(^Trzhe0vPfkWDWwq4KMK$)8?}10;Zẅ́ǎXZf@ L~6D!z1*C$D_inEDxua>L~&/ߐM(P Y9pò4Ƞ:4/BdCcj*WR*t6&Z%>Kο-mnAÖSazM'%p5۶ImL|\F66*㵱@K"~GAD(؀BY# #e+,ݛ'"r<,hQcsڐwm1,!RP,S ~z2J*A_}OEԪ%GyZiKk\^!8 ]y$1 Ιb\K!~*lOԺ3#7>=y Co!'tL~2=#5\|>5A2f8{IUYy )BlUpvpǙLYLISB \LeJVAK"%Pմ4!_KY;脸\\ t9×BCi.I)!%̑CVP $ Q- +̉(2 .h Avڂ:K90(V6T1<z˧|X =dǿ=DP=zn]큅~;)t@54C@y@A٬kV{{t‡p;)o %wFK~??絀dZP՛ͷ?CQ 0G/]xJ֝@}OЉQ`B'&o8VĦgΛUZ}&o8ynyjQ_8 WrZ\3iY"$cC$%K%.KBNb2s|͋k)^\gwaa!āEMEl)eT F.WtFZ*$]ŹrfZWCoi XËGʇ9Cl8bb|j^4BƁ__s R0la5 KW"DX!XDR6ݘqyH+ [Pây0콋[Wa1|ZiT6U?2xPթ! Xs7Sbu=Lb ي@蘾VU ,$致i i Wq h[;'͓oඉym v]?A1z&1a~Ɋ݂^QD0Vj`Qpi<(w}~1Lz{>Vm.ioC4ct爜T,qY$ &e DPsRH!7ʲ0j&SDI2s, 0D̤R$3V\RpJ.i6 SMEZ+I1HD^%%&qgpBB(.3!"BKb8(`I($Iir\bxRp Ha#ئB`\o7[I8(+yY(PK5ՄL25ijN9ƺm{"O.Re%,h4ZJ=D)qz$jP]"~2r;xOe[Gr2S!E IquCz7&Akj=2 b}\Wx=F5ރ/%8=1+rskov|:_DAs뤈PDtNۖ3 ЌpFU p$]բ .j3IPjUOAJ,iVXɀrDJX2SDzl/A^"^l[hUt\"5Ou"]FnLնߋ˄6L .ϑzZ'Rr!X}SyYP۟ la% P"gWA( ,2Ḡ)EgВl`N3qYoM $vEDp{R|P; n6ģ73|rY^!>IqYpfkl`VNh6(YWs+b^rAy U!B 0 rq#!T 4ULh)Ì8wA#s*iH@!8ALv߇`OIiH@k {;R8RFFUH`9 *xTuPFB[ae ,1vC}y + A( +7z t`4d'thʻjrJ Uω-%H['Ԕ]}_]*xs炳;_j=sD3 gR`y5O6⥭wu#Ռt֞ ZC(̲"*t@$6W!`_-y^2jMeĔ&K:52Xp;^"@%k77O,o "7>vqӴjd<(D`ZtM]hmmEJ}{j2RozFC5co\@ft <,2Vv@-5cZH-jRhUq:)槉)ZD:`mH *>8[0@賻~V~+Sg0GKv :cv2tn ;\XH;F<Ot֝NGEy*u*浵w<*4\}*}.f5U.o=W>DT(wp-C抁qmƺӿ֬)u1B^uS<#:+FkY]10Z۰ڻjǮݜ[LnF+NcJ0򭬛$AX7W Lk6֭ED)@k=к!|:)IIM-Fs&Rcz 5F/5y3#?/|?3}~E-΄*eӻ%IbvTLKP!͘m?'*t3Y%ZI/#v5M8Bҡr\Mյ)92י雯!דNm:[/0xdQ9}_ d-b`(<OrXdh#fSVh\.5}/:/T["u.?یK0GG4~? \+YXXj hL :^'BDS%x~[V/"J0u?Fa{p/q>'rDD{zl\7IQK~}EjEug}V Sjeb IIE,qF4KNP:$㨵KhBX{1tnd4T_~g&6ᬑ)"Shj%ksP&X%j}C #idAob6W=EYa`hy.VHCP?hy|oh܏G?S*or4Æ`=9 -قW!XAX~N& `*;'9v/B(nޛK격5 ]Q]P ߨbC_[mȶ Kۚ37gydkK@>"@5Eě|o"ѕ%h4?V?kٗF2h%pMuVm# CH-}[fs~*@n 6aSضSac Hڑ?"6sNU!γ ixӕ ݈-ybY(auUc 6MTbcJ6".*Vo-؀@!@ <4ŀ 4[j)5ӞB UíkCIšOo9eBṖpTn]xIkTj$aPfӽlJ :&vNFk{9=]H6 =?v("{jIp$Ut5*~H ECtOB,-r@#-dbX lQ.a%)R*W;ls !o2!N~1A\6Rϑ ̆!橣 NX619Pt$#ͯ^WL2ַW:F1 ^umɱȈt\.g-]91URFME0% ̓rDkrWB\r'ΕRJw:}uٻmeWm%@>(N`~ D*k˶eǖbɔdIJH>3 g$}F[cI9`Q.PfC),(,oRR\UU}3M>ENn_u.;"*!WMRKd-EްEi:/UIKQJ'qDV8ZbdSbFU #Hy =AFĉ%xSU8)'=-ge*%XRHc&\`;#817qbNa2jBDBB+I),bp kAņ2'ӎ`%d 'XB+qpJ M5&UD[Q}L[,HaR;Nb!hary8#khko;QUg .u~US[NtCcea7|_u*soLt?6hB~v3:p%ˋ`yr 劢dgsݨۜ*QlP}nW k`^t̡.Ze0]E/}LEP|طč?/ӑTAVɻ_> 02rsyGgDJc<^({PhC]p;E޿@pRaUajwNO]C}7sߢ --Ӽ}!޿F-!%/6b*oYN']U!^%)2jZXX\`C+pĒe$F9Vռ0+VȵiIۜ誦-!-|]ҪصIBZF0EHY#@+Ftp1@`0fYo^9b~u}H"n$ҡVbg >_~!H+TGثV B "EI/[,ij{ߺ.e+!qK-ĔN*7*vaD(pcI $Ay][8$cP# 6$,%-AonƥoDhjӠ2[t9hNcl‹M5(ZM\jUӜOѣ%sFUn4 ̓^Gـ~dkDS.%NVZrx£Χ4U|> O=)FK I}԰Ԇs3P`)2OL8%N%&~ibPDwH$ݺP @Nڅ`>[Ϙ4Mj KLVW&אG08sy__{$ǫq:,쩚TTUs*wb{}v^W}^K= cK:%PP"v삯(kVP fA,h)߭i'O~anjSc5F(,wS{{x`{s΁c2 M]A X&H.8r<;zx)HP9'Pc!\aiCYr[HTЀ%>!yϤ9T}Vn(t|4l4Vg@]RϰܞUfXXFPΙkDICst!V<{>dRB}| {yy?]1@wibMsH={|^*LeGnh.)ۗ-ع΄ij{ziŵCw9HK^[?ȳ&}w0RWmMlZ2S \ߝ\T\0GYvVg~:UgK?뢫?ٶu~NF;jVc{r4>B6VvL2=yډ^->72pǹbQ0uWW!HCr )}vFkhuM+m~P!6ʘߓ*]n[V!_ӭݲnU[(ET'2t{^4f‰ Z*4+W ҨMɥNi1(:mn'/{h6jn]Z 4+WuJj:e|'u˯n RՌk% hIypD6y0@hbPϕuݷXg)S4?}Gx0UJr<駫g3x/BܸO.?ߖ4Jȑ a, z~~Eii׍˟DQ6k1&-n2u)H~.l{sϛ buq"("Ř&hiƆ IJVPXâx͗qE ]FK/ +~"|06lpMzpD8#sӫjE߯}ZE^mQ5̂U&461IRfu.D8Ơ*Ͽ $V 4AjPچ6~b,zu@ø`\d/`׿WRDڄ 3mI//dbïT ̕aç) x K&m7n0cm'0ɤu9Ƶ(\J:yC0'E«"h1ɴ5 1FʤTg2fBlX8cES4"M Z g1-֮f҆ɗtl<ٜzSl䅿x:y$\xcw}#$l ~{` 28S "wwpMd|BqcEf]ƻH8u+uK˶uy. &j+0{XKS.[#~{%-$;^9B J& *#WW ׵.5i 54On~j|Yk?_LÄq43a^2-@M!>zW˘b#*G,M*SYryqI JQ"d JcB"_K T Xj4|jU6lhW )gm~7֊Bl+z -c6XܐT+c p"BT8):*5LDHϒXeNUnY $s9\kL! qӏ=dzM.,A%ך+gfP2PˆvNqdkE<ڣ> ;%ЗTfhu&^pbtwWrL===lzey2o7Gj1/~`>w~R{忔Q0(Z! ixIE*u.?kYO?pyJ`E:~VRKU[8视8X5l}%?gx+]-jEPj8?xDf@ba.k!)UʤcK;LS@Ɛxai#RH7(։.46r&)I Klb(F`L@ YPʰ&W* U=ʵ]!`(LJ!Evm3Qh{p#x*0̞;w:x R`\vUY2y6kF6x[$ kۂpZ5 Q,_EQ\D8 vǨoݺbj~ʟzR5rDlHd1-"^+v9] he}О x qpnuVB^j7IFlzڈitz*v6" 6j!L rA!1ZCޙwR@Togۼ{r\0+pl|bۋ~77݋nf{F>ᧁ<iq*dˤTZ=ѸVM7/-EUWWrn\DT,QN[GI{y"{n%!Z<$ͨwl^j[]hZ~pJ)G>4-B59JɑSnݼ0<2>Nkwn@L֙$:4I@sJ{}vKUmBz-0:OWc% zE컶FKW5V9̜TUJVUaX*-+BkKmLiR7&P0qw7~kc`280~@%{>Y뺐Ofm:ٻ6ndWXz9[ T즎O/v0ڥ.!)'9 eiDIsa4yP$4@7ݏx_yv`^yviz)`J?P^^M2zP}o{c!ORT/ K;k⇢MX^44}ViN2Us5L[+5~(߫aJj{"l h&*1[S`0r8Qq3s~wjo|Ͽ~׾mW'.j5K,MJԱ32!@O`R{`,Dh퍀ƴ|NՒCVXdA Ei+,`ܒ-Z|mM~T([4ZxH;B>K E[57 G ogշ/0ieD;ZQ%x9Idqi|bd{1n"r?Bv>g29]}ͧ-s3 uZ}@>$cvnt88R< Urs}jM bvB]'n<1&jVc7.BR& y*SAc=޵n1XT bT'Mpy'[zkuˡ!/\EtZ7^ɋкb:hb݆ ֛uKun94䅫hNXv -I}F6A44a˺GZU4I$8Q^j"b4YgUI6O|@vKM|:EG CO 3 p~|&?!LLOTO3ZdtF4 YgD''HR7 c&INO3O3srz~y&?!LC'?a2S&d&?!Lql/sBRS9]%_#t_|][ʏVio_z o53!/S#R³[|ޮ~5YxZyPv }3J+G\mYIg' 1zǐfuڇAq+W/b #En*&ݍ + Szi8%/B "Q HȖDN(fh) !F{m*BN EKh",RK\k؁c ZnPFX9 nٱP(=vp{cjjzqlВ9 ) 2F)%)#^@WJZĂm ha a%l=%CG póC\Z **E|E/tI2-f1oƈal`j?7zc>Yo ƌ_5C07~wJvVQ3ʢ) 2A"-awat,13L緟w5;s{kB {1^3= |=CZ>iǽo IOd#] qogW}>q~f}3|߮j~ߙĖ8cM܋Ѱ5q3˺W[ػ: v<̞JO^z{ }}!6|sұ-/2b:HpݽSZ/!RQ(G1]u/5o6B"(Dvg/;o읾 &OS[≠E?c xN1a:mܲыwat'}X`Rqr% Yw7dnÑ0~0;mpykz߽:ܦ`XD2 Bh6Rb$S.7,ׇ$, ܪpnv="8 O$R0TW.  9h%;h}vE-.Wã/BE7\<<{{(r1 <+XY߷H6(ofҐ~0[8JK%$ɿjHdIvSL@j e&^M#[Ɏs͍ QUjK 6Z(B:P8BdfT*T%+79 ~NXDzK('!%e %Rk U2TZm3!e43KҰe!#Ԍ;'y}Qg߾wbT)\NXop2N(H8t29z) @opr%g)RJ:xo!E%sH_^{\ 5/[j† ˊl:/=hS6lñn:Q|?{q}xb #V]"&VX,`/7 !p r9sɻjD@SwŊ֢s%#|0J!QߘK:5&޾[Ư *'/SGc\9ڵZ6B%ɾ LuHK%NNSۤNNeՇ:9QoNN8:qxtͻ{'^\uq52ԉs6vS'r&" BTEwf4ՄF?Z?6z18v5m?z#r tLXRX)w p\]@n,2*{(֙?*90NX sc!X-S;Ed8Jz=xY901vi6~ȇ4H9oQ 6F j0P`%g5,sˣfd6 =sv鯉 PbCLYBCURplÈYBskx ֥N#FQѲIry7.zS `|!Vi91XxŅ\a~qWGk:96Y0C'Gq]B(NYݰ$ĥCe! w2 DGT}Y\6KC#z2nuQl[}JDLO[֔BAEfӚsB+_+<{pω8گ##'Sɡ'c#yЫ./߮$&Q,xJz(٥ Aʇv\koTV~2 ,=E,nϾ$5ngJ!vɮ*T.7GC^&&?۱nWl-I}F6' 邢7}IW ӱV :b͇ r ; aYW?E%Bcs@DZf tT8F=D08{/);2{) mʏ1lñZ'ff(baMw_p10yL;4H *%E<(OaZ#[>nxD[N40ȽNQ|@9.VEwif b FDv| "C4 pa%S~3$Rhr0 ~0[-YHj{3*nw׿Qx8ad:疜P"\2qc_";UR lҟ/W>Yvۦ)_˰`\2,z[HH0 k0)1p{R0OrJ*1+3W?fB:YY^ [)TCN82oRJ+}^d( c‚ i a*BR+)TIaLsU:lI?sPc%w"'-EhA2Yn O r\Ao ˀ s@hC%aV,cR\Q +A1D$ W|1Rgj |RVo*F#AM9UF[p0ig îД Q C\kv{ G4" .|4zM;ǽ]r͵ )Du@mnׇrO͔uvN=٠I2.\#"yT[-[%Z蹿,3D+b] ~N`>RDx_LO.L?9MUP mLop2Tme2j^Ok8|V9aUA G-~\5ZtB.A,<ф" `Uo{:I$ }P+طX3"购}l}iVϳWTh'ܾ]VbZ9tBQGa>KXW;NCgI9xtU.%#am \E ]`5b]k8vh#9[s5w-9npCUE ke59I5s,cJtsēbķaӬ:4k%^ j_%`SqV\_:E^Ҋv! Y}+쏽T 9Qjs1w8PSĐM3!PU* u⼃P.$kXxRHYY~yM=uljmE.q w3~n^Tf}83 Fc`̆R?h@ЄPhN^],(Uh'-'r]O9z]G[t-(qoL^ɠ|< p=4=7oPw]1hr>8JzIf$nh 4F<8Y}:}YgKq ;憓:ػdЀtI#0јҀj+yl˝iaGitYu銓>xq(0yƙ̟:'{yM WS 'iNl #m24XzGl>/g˃jbd+6כ_™LqH0 Lp3 ?YJ5qoM\[E|lL[6nYq0Ls&,9| ֓?YrIݗ⏘2UI&72No ׆IP!R"h'>v D2&xN'm8yusk9\es{^Ճ짓_AzȰt:m#݇^  18ަҦBKE%LjȜZ14ݡXui B,[ 5Y) Ay*jX-c&-'EGYKL*L?9k j{/\ ϝ16)y)B *#RYd旋C͋vY 0 D"6p`Õx (#^9 ^:Pgy6)]ETUcVXItQ[2:[8Ay0h! Iy) KEc$m RF"`uJARj-ic+TʞP@u~1un\ԷQb21xK?=ogᑦ?="Y,쏽 y}sf:+v+W'DȤ\G||uŽf2wLI &ӯgӯ{.䗹S=sߎF|$6G/zxG^POfok:GbL'=lt(;MOĚILs%]E.b"F.ba+']a}EAR_@wm(.O-3 dlMaJ!z GEkQn=DyW{)ews#}U ܓJUP elal*"FZlL:RviV`vj:b̄ؾV:K'WRjRgg\{.(T+􌐢#l}-Hb+# aY ؾ40~a5YbӘF$AI%+eX#m VnR B*Q*x7j)=L<'fBv tI8Ҭ =:ɮFZ)fJl:kRiaiM"KyFBBh )KU"5Yj|PdQf` 77Š^qHXY*L@(t!396qF3̨yYG'c _e& 8tQd>TҜM?6@Bq` )V Q g"U-Űv4azqGIĠ O[O3l&?Gä1sjÏK x\skg+dPܹ@.t&3RcdN.dWW4Nj!2x\ _BTmcۤ@Ry6p3.xkWGaJn-dZr Ok n-AlTʲ6r"LDΰ92R$JQ: IE͈%oI1f+{dnOVGC(&XH'8bm~mE 11ZS5VPnϻ:;eC\=2OGI"FBxUAN.i8mߒo:93R 4w9րN 9c63NE9C4W:3:Yrvʠ..irj!gvRb_r+T QB+$Rf%Rf5~N-+ M䌀X%go\DxSӼ Jp-iXЭW)^]-IeV⊸'H7gqXYEUY)ZB5#PjMT [1B2.%ޫ;t+5Y,nSOV.jݕobcT{^]50_%k渻pWgҕz0hAfx-+o#׋՗$:rRll@=N??~}*n{0MKfhg͐ zp:m4Zv>h5Ϳ[~{PKX⠦JY_`n,bkhfE嶦AGu>e8yO>շb_޺&1qJjҪokyYYAc\!- ɀbCq{xfL{|2)wS/Gَ' ZPؾ&!FJ`4K] Ac{ HW2"@=OrY,WyJP#ăB|Fh9%[_CoϮLk=T[TͧSMYꭥ7DfDۿn2޵c"e0;+K?4b;әyQb%ڶe$/Kd$Rd]pl\$sxxyqzIun8tL=L ~Xfxsn@sT݃~XZ|c7?h&,Oc {DXk6 lS+QoLzMל&/[ ?Ecի~ִUmcs;WţWb9xT:D6nӳΗZ9VwLX{+)7Biy |*S>ݳnAXX |T'߾)E3wiRh7Q:EĹ;StY/|ʙ36]ք˚n&H¡`C.*TNȸPt,3* P@]'=uRܑ:ay^o8~1; ĹC3i{TFP _>٥7YK m')t ϽS|;eֱ{}3=F{;#{'lk-?^|7f=FJüY1x"li B*=%jMݿ&dhK L@T7(׽QSIſ;:)J/ \&wsM M-#B6ӷ̇ٞQPgպugUΪ֝Zn3%S@JR"nN^RЀr`,yKdLb~ɚ1>vŇ7]0[F# 0.pYPR2 8EI4e0-1.Y^ g80+TyQAY.c `9̈9(X`+V˂ F-SpteиJ I_{-H|= Ϊ4eCc6fʦ#HzQ [JRC9ZQ3ÊjQ}'r[O!T$|* msh6ۜinICd&\[bO:_~8vLw9%mULO< Rk [cMi\OEH.rXGQa [-SP+6"okZܦA 8@YOfd5j>yxy13 l 6 9GCԴx5^̚t&熓Hp$hwͧ7gXߍTeYuӨ3_; &ŵ5MP rL5"`,hPdpόJ9Q[O#MlMlMnjmL_gLmWR i*>:@ZHH%]ʮubȁ=Tjr2!:"(l_cܾ@5p3Vwk]N rfw{'OEi.r0@ IIx) p^2I(+H^(d# rK|``G5R65MxwhD #p`̹p`lT]m{υ ͓Hg[KǬSX'L:+a"cc2#xf KR1#LŃaR*LquHĿ-{-ouR. 'κ?mb,VhĸM}78~l-&FUÏ_*im>^}Ak(6}a7x 6 s1m\Lm. @9PrOV~a߃U7?Rq0 +&F7 $ֹF@RN&+C̴kԚ1θ04*ckNTn@R3r`«%XlbDޢNZ,0&?-m5;-Y 'mfɜQ$r,\E.Ǣ@$ un t]b1Hm#KV?!O!߸F8^R_|Og`]V$*>SB˼[?&8cT6Ɯc!RJԗ/*H(ZQ$ӅRhk1"@g AZ(Tg |1VL4X,/$/, R./:c@C)Kʜf=%L,FD:ZKp >,`(qxzI#@W!=chԨ çVA/2Z3yہE&0-$`y?Ӏs4fBs6l~ɚNMW}j۫tAxDU?r ʀ,,Yys$ 3R3$KXɠT2yd!2 ~0o"5ztL2I vk4P}"i`nWuM5:r ,̷-kQRXb`%і:Yt:i#'rNvOg.'0Ctf۵Lԓͫޚ^=6ӣ>j˝OCG+znġEYE) +e)E.K`^R0"vxDmLDqîN{mVg*QĪ؝pF;w֛:ұ. ~5pT?x`l#d?hdhb$ ސ/B U4J]֍aEbFYӷNd%Аo\EtJ|>e.e):h eM )gp%t 4A~{[EBo1ա׉:MmwHkmsFuO{Ƌc7_)cw@p95{{Ȧ٧`|Xf?٢~z ,.q\b%}{/A+~KnD ?'?{v_1FDn͟|\mr И}j7eL0`=taç)zL($]$tS5|g%qwRC%_>˙>H8?@DÖz퀀؍t {F8,U迆Rr\$(G%I\VX?=y>]rIJج2w_~_Ÿ :4l`~mՐ2@aVIbLFUkD6\ڝMolItVT-4z*c D9pf:eAyǼtLh ZBV4S2ygΤ0w =, m͡-{܍z`S7^7q&09 X C)X{(yh\¶6u/~ |6fZi[ߴOiwt tq w9Jkd˝"zY .|+ vNw;[ϗ%p sJ\8`9S%j~䢆c|,aTD<.e ( 3V0!d8BfHhvMRpbRmB64ϔf&MXe흋k31a  dYFAC66TnnJB蜂"C,JE.EA53ŠqьBëWcŻ jqm4=[EuYM}}3K4=g X?8%c7̲۫/~߬.ͺ#A]!84x7Q畼>?UGb1-z+E-я&0 =̌ 8v㓿>ʬ{r)D[WxU}X~7kx~xNu}3#X/׀2IS]yB_&3VE e .җ;Q6IdzqP6l1%Z@B䔒]X.kG8-$a|0Ey(ZŹ41|dMv,f>f4OW;kjq wv#3tx<-7dz343vEÏ݈P,Dx D̊a<2g {o^70̫Zm s+f8<\@Cl̡_Grʒǔ >͜jP-ܩӱjd;.YPuβ4$ya9(`Ph1ZBVeNUN3-BL1Kz6Oc! Hʻ{κNs:,ֹ݅nǃTmKP꡴i5>&#x=QY5>k8H=!wR:$tCCVn&Qgt71d- V{[=DliV{.ͶzC G`>d->"Շ~QܺMV x t\M&HÊ cµ)FUsݭ!eޠV98OyKtZ~kz=+U7zJև:Bt\tn)IKF:։=ڹ!C"Db wmHW~] kl<x~@խZJx JT,ffeI2 .Y`g9BA,z84j)z 4Gg5t714Sk*wN5!Hy-"sRk].B@|R",x'tpQBĢIli=$,tM7w+n~*;xDLDF XIFj) '5Xu @IL<18;ӻvq?t:E=QOXL"YHĉ ĀEeAMQ3*S`*4Z4c&\Q%{f PTYS!Ju9r~Y;r3ƛ@6q)24d:W 0Cߢ %k bZQСDSyfDSy,j\XGɚ"U_wzI*`W)#ϵ,O,E-r_-!ưєژbm] J;`@W.jt%06ɡ-,$=u_XW'=Ӡ@DQn_d앇ӡz"ӊ= f<C+3R|S`|Q^5$L@Gh*G{al L{)0@ [8ZY'6PW;xҔ8%Zjq]D%+цs*G(*q'%3ũ@5sCR` `29)!Lʰ:1=u4jJoK?vEcُ]XL[Šx^֊ !H ,IǤ) "hިk! VwʲD}d%# k`r@=|oH~?Fa^ARڝ`Rs2EBR$Y ";L(a F% &Jk.N+IHg< DQ I4PxKOdV{201h@H+ U @zrA^J坊1"6)`†`@L9Ћ MALf[Eÿw[­Ni&*CA L9E^y-c!w0lM'A*F1qnG2RZ0@1lwi͜H~g;]atwMna si>cYCNwK]>mP-4S!ϗ`Pu`/x=nAt85 Wk6^oA,MpYcY8lXtqio1M\Q>ڨcrp 턍&jH}Ĥ* ``x> o}lYz2Uk*"d%^7+OT1rWhu"sփ3V_Nm5}`b8VM~)q}֊L*eśo>|\?c> YW&ͧcjiV$< Ik?ӿ},};IO*Z_d!oD)Mдx0_O"P<To ˢ}o KFok/RVW`ux^bBj:dAd;u6Doa~52)nt~uLݮXH-e^}/ZZ'QJQPcsE2A#ODۀk5o騩Gltcw'mVwN)>'##:Z,c(iyR'u*AZ!Y ƪ_hA} QU{CwU(&FRأfS)~#3#h}].DsN8#U!dЙK{7lk!q'ӎz/x&kG Mb(o\CST|ګx 8pAhᝀC.)JnҧG :I6n0˓2]݋qU2*s\ f+6Sey@cĔ埿5_Y*OFZ"4)굕+a:x#!Z̏蹧h>D"D6ed~86@&e)zE ɤD1DO!:J,p2Uǫi%QFIa7,\M"MNG*Ej9!BRf) gA 'dTF|wY0"o];IS>6RiZ$>&\m,{I7Ąk;҂!:$QFd𐥸գkq}5YQ |*{>%=Z2%tYՙK9t*.b&z._g۾4> D zhEL{hA[UX`谾#'s,/hR ;bz-j5QWiLyM,[dѕ.\ҋ9 K|٢}'iIX)e!qb%Մ5Tq:t='De ھ2ioЁ%}}$%5~Ɖ 7QiދLf-af+%Xpޣ =P 7nU6 zP(1r ޭөGvw)mUr Wz>,䍛hM*dӻL8wŠtjŻmXihrhVnлa!oDlJɿnޭөGvYGn [M`c8v͢&9i $\ÍQ^(oM*KJO<׊?Oc iq+ 9"-t+=SEc?hm# 8ICz@Z:LpC$H^Iݕl:u0T[7M|$yt΂_X\1 uR+Qנ\ ]E"} %= aC7 pvѭM[SǾ/t Q}_ k ;a@-/c$ = DOu Fq;oJmG: LUBQ Et?E3BU(`~SJ*.kmٿ_?9#9[{KfwW>>ظx7 y/ #`{nEM24{!jjUY/iP輲,x~â#۴\zNG^{g,@AZX[fKͿ P}rӆ}H #1B،eZIK.\|!}wm͗y-/<;9OW|yEΗpSNX#-!֛Iv69/fU6WV`%#&pZ\ɓBvo M< n.r#CT j)*AGL/ 3©Fls'dh$k \@~!.:$IyO|~67k ë07V̳lƙF M*Nؑ,B6d:lY@<.+J.nXpXwb=ֲk9#0AOea,5"h#-GmPyJtNX䌗DYD϶Რ-qDA]V5 ꥚I@Ct`DdUJ2ڵ[BJpgP-dIQUnF9hN Qt*gh!}$#+m@ [JZ -US"w]o3Y˕sgy':ӊܽ?3ǿYμ c|FW7כOaڴ7@>ۻ_b!RRV>cSDIgWň3#3_PԿ__X6{ZLPJ$[jxGx 19)Z7YUPP9&A N[us}gULk n 3rڕ r/qJc<^jX5 Č>ؼp,\ګ9{~"N~ nnvWj} }Y1#홻}iE$@]5s8\CErVv#::r0WլΨP*ԬbQ,1yKȀh51Qĵ2pD]/q&L4LfVqI#_ V|i?L~فATOybȶP"%4:I[KOEZAbVb6Al )k\ooͪD ^I$#{j5LV['V`0S$Rvr( ޗ(ngDܠI:uxG ~"靃K@1& 9wg8n )y{h4'B@/XYxi$eb,.~hN Pu=15$\RsPrA Ů &1jڅ38pN(rDN0aJbq߄so IT>$$\<#l궿Hyo/Rou$3PgOqF.N!(juRG9٘2Ւ7e2;E]o&#_vo_֐| W6%鯖C|LM4A@¯\* 4qUigrȘQZ{FL2;1G)'FlQ΁ANTG]BS(:8<1(̦s\p^u 8zC7b 9DB/Jw5r ՟>[`ۃ:DqD#QDu;:( r< F5NS IEJ.?]v銸CkN#c%qe X,H vB zԔcֺ4Z 8l|EBAċPR4d^l+\'@]kzغ:xn ֦;ASĉ*!0 YǖrDB UX`).n-2`,P,0PILD-9C07k/X.j+F߿# 咦*(OyB7Z9zu4H<*e~`o2vZG8uɄ;Vnzꊐlww=mUl=BE)f'&qŐħ}th|~YbuP:9Zi,yE b(- }!4hEq?[p^]No9}F4s}D鼂`CsQTARwϑIK9H .9'PbTӢ|2y0y0y0y%r \0 є["A1H! ?Y)G-QML(3uV쵁IĮCVbRIhXVYmK'%Se XSJ1$q'he;Hx &)=h$DSP(wV{ߤ]Y /<CYq-857z$'mvMư0[ͭ-eP;%v|@{uϯĪhXzU' nOzA7\ʒE(q+-A_>\ 6^`w߽K&]=؏s}(&a^+6A6*# SFvůTs*)ۻUueAߐLM{k$D<[T Q*B֢RUL$ʆ64Y V1vN)N*/AdX ?%Աmv H$8 (eSfZU^P[MW1 *.bGo !) Qֿֿh,L!#pUVʴ\!V[o4[2ylJV [yĴ"*i)K`@`pgYW|z?JEBOZ Ym-l\СГM3$HLahBOx-YX+ Ziz)>Dqg-_Iw/4 |vSyDMɠmO*>.1:ڊ*}/er9[P %ZXh0G!Rx:9t~b1/o|[,w9K;a) SN^C/!} -Ad5m=zbtm } m08y9}UC9{U~ l:QɫQ"zג߾+wDD*lxt-J1"F-W+qWU"9;`]UsHDA2"X ΀ɑ؃BׇÞP r^L/a6||iP(8HfšeȂ;\rI,Ri5%NbHR Al5Fhb %h4Yԫb'ws=K&Ej;6rvO) JR}rWtwKLNI/5 07XX~?h` "DWO2Q֠7Fѳ?/ukg/U*j7q7`#?;/7uIȢQw,톐 Ң$$M)qdM%=Ю_])|_wߚڠZ'$J#?`a G~ Qt4k"ɿ=LL/FGuzrh@~Pd W|ݘUw72/]ɫ[w$+꫙ka u66Ϧ-SH)'@rM}o+Dx_`˿()`%-~=F*|y1nm#ǫE(66/aM(['8t<F~nd1t`vt1=n),du|4Gx-ԘKd0f4OC]yƠx6%# G<6b(|^ZnCPPu^ ]>8[d&;ʄ=[T΁i S (_N9ƹDD=Wwp^5!RC</)GCV5:lo#ŗ(hSRQ*3ƓEܣaq9Aۧ.Qn9i@L)^SDS5Ö"ޠgs8ދuQ{o.ru&@lu l#S׳.AX͖z=-ȁ-uQaiWO1'Hma%M p;6JSB(:$& ELqPH~q.'oQjbcK,%zys^{[8Պoߟ—Ј=Պy ^.=p?[,]I~ڞy؞ys{n}puWP&FQ"%6.(+*",h]JZνQT1Ŕif=]of75Sz?\bX>z3Wf,1g_6MR(D1uNu aOJ"q*@Q+F V.@Ԃ! %2trXjdjuMoԜ81GJ- ^S6tStLDuq2i7)jvC~VϧLi!Vk$@YUd!vppoVAD{}a?wô?ku2C3^Vj Sc$>޹EO~f_,mKL6(Ɵ%yV8YyV4L~r5I6WS=_ܲau& y&dSL'x7[*1:Fvy`ytFHB޸&ؔN5m,~/%xydj'OO;F ];n=?KxxAmǔE0!e?MHc E%*)'Nȯ.6`ظB*95S_jdJܵT +бT'7=cLZ CBPX|=tzޓ]!>a ȷ}IP[6gK:a-(e9>Y!._mro#گ#g94cosL!4JȘ @d7Ρ3XP3GDgi1oe}HUJIÍ6_iwQx?{>Hpt<_>\]_C YQ|c}xrf~8>CsʘԑD$ikߨX;yȅ5:B!2Tꬃ3ZDDT?<|\߮|7Dxp\f <,k|1ԇ ).?3s bnO `lvDR*_~yvϘ9RJUPC=alO2avL!gF//sr2a 笛XI.n^!ŗ{U;.BJ'5ye'68M֮^-%vXCeA0k,K,p` FSТdGFR?!s8Ux/#.4y>oN|؋N5N5~K_H[r=U%$M9^L2>ϯIT |Sc:7m 0ǽ-WO`2ov&:5%ΡO>TMiCE.)AXdN3eOkM#lfpDI*ZYVPLtXQSbJA.΀p)+)BFg:x;@^;:GQvO(vZ0 a?qbH; ZpP!c` ) YQX[W*-ih.n-2`,P,K8| B %$U]1<]|& _mgg_q3?U9VJ0UQ9 +UJIHJR[oӬ[N?{F俊Bv0&l1ֱ}W=ańn:ʢjC, :`ʃ)Þ=Z=,0Iهw2jkz2 4SϮnҗe۬!U=i9lQx&]Ǝ<ͽ٭x*G5f2`N#J#mĀ~~>*껇ԗu/ J^fLyX-I|0K(EbJplKd b@BrRMۇ~,LXcPkJX(RT3LDfL FJg1|5 wFwvfA3>E޺}`0"$=~3 &?ŁwL xLv?1 %8ǃkYhVOvz3>NktoPи/`4w z(ĿC5Mxr_-@@#A68= Z#9S\ⷻߥE`q|[} .KzVq;o'Q)-{p int#sc>R0iEVrG&`#u .0# Pjv ;ftq5"D)D̎u:4E"+d=ƌn+Y}XN{Ot!hc(^O!GKqxD.α~AZ'`V+;󴟐 @ftL75LM^ZSN+d@Ifss0wltۗņwCVM,ʀS T~JрE'$V5 qD TjߺFaR{YM"@)=Mln,j=]d+.kKoJѥwgw.N_>.?$ T3`ռ9ob 뫛OS6[=ak7_Ǻiwn׸psC'wі@W jxd3n%5DZʨ7^cxnգH$SCSIߝ] J}E:vm^-]_Qk?\/V5(}X=/d{XdpL_FkyLğ-?HAf27tl _%׳}a! F57s0̈́|zesUPue;!Mu&;?M1s'XFnIt,a 5 8|yg7 X==&8SjH4h0#jЅ7LIc2ܒ 'ZVε=?:BkQPH(yNEA79APt,UȨ'ь6>(w!wjw ce` >9JkbAD+,l ywr;< ~] 熏:P8B-AFo jaPFGyk+XXy!LF$Kif۠Ao^yg],u7ZEԦUnwUBF lJ)z8V5A[ jxj:]-fT[-b3hl J4Klpx|CpdJ5\# 퇶[|iנ\Z*"ȍ5tݮ~H'!6וAYw# YcUtV;!U%j9 }WHn` q⦪=~ޤZl8^~Z@Rjf[ܽ|pJa2]21rbӕwʎx8G8ܾl߇|"Z"Sw-J1wtnG݊'\D)lp*%mV06fz]4w};b#]e&R^m2bR*T\QWR5]W*{u!6]ůu@]@ qfOee`V̄532̑%Q1ԚzzDۜ B ŊEK GOP#? -8v9B}tqWRNZ;>LlC`ݢ| 䊱-앚LoSZL74TmҷC^呻M}s⬌ƍd-4>-9z oʠw s>n|AeR.g^ 9#U̟К5z:ϧLn!8?gdU4c8+^q+oEeוDR#`Y-FIP񚳭~ NYt*e#Y!@R"sΆ|UW*/9/UEAf \?03yUR 9h )Zg23V 6Vwmd+VٕJ;۶9B WVi+ea͝Jפ؆!{T?Qlt25wJa{Εf,gp_w=wAFy`.xYxѵpc4`nJŔӜmO9,_VNbɜҮʱv\ͩ6@(BbҔC!E02Wboj7)2q@32Le' BĞr3g/k{<;. ѧ#aycXкsC \IBb( 5$]HwF :p6)NՐ,$U"(Tg0#Kd&qXt?iϪCPVj>w yn{rQod&9w2$F0iH" 22@a#TnR*1ĶnzZ +wnho0ڏR!fŽ$Mtc N%<#G5 ΛӜ|~Y}z|O4^HEoluYt7 Y.}d dI_+<) "*v YL_&YgmO%zYg;v/4,_dxp#EqzNc/0 G~G RL$R[DT<;;_?FK;\S3lwc [5I$uWYKdӽSZm1R_PQEI#?;/c R؄w)G$\΀/-N;L``eY3Vx%, 7 Jgٵ靳 8]@]IlSiޫKFr"?GiV?vI!Iw$~fL( NT j Y/D%;XRQq}8ڵ֝\Yb $ϕ(`~^BT((nfAZ/~&TPR]EKj.PIG.:ճŷ2 4@3`$X"dG;, eu1`s^5im@4ck1h#->~8D9'[ikFv׆dai@r= VxT|zw Q'x!$uQ@hт# m3.Bh^$e${h$C壹p]`a!/*6^4Rx !D f٬6 $c3P82Csd mP?b(p&g ^,kgdΰ_< MgD6Hϣ|!* ]hSw9!*]5DžBvqTp< pݨ`ٓ:4iA|աϡ H(i%Jf1ӓwŜ7@9TJq?kpO^:`x)_"AI%H*R*,!0Ev>e*T2sfЕK|>pfNW!fȮ'H?ƥ#ԍ;۟H! sUu/.FGu !e-'_ \܋QBDtƅP!.+^ua${YpF/xE#HhO׭\pы+(#@&j69TT- J/Yq4^ )xؘpL?UvuB8%2+rѓ{U .Ӂ`W~ s\wdrj9U_MCP)-AGoEAgr2˿&¶!?p3swu{nl1%0MΌ"d)R@ 3qaB ĘL5 iSEh/ךoooFY!٬wGm?fr# ^)f;oV/h[q6v)Jb󜚌gھ`((@(W S$r 1pP%2 }Wq>i-c2.;*ڡG5Od 2YΗO!FdY-LxX/=LsR@.zҌι39I h)30ϬB ,yysPx Rd0NdSҿ1uM~ɑȋo\>QRy~SQ&Kni1%/|jL OC?˘g?}筶.Zq.$!_KJhrE8e5sň?+¨+Y,]f At7q c 5]AQ麀;Ń$ʥ\jH\_)ADs)qQu$4`B"`k|y-:e %g}OyC~t-L| $\E6r'9㔤 k.IКbsbb@JMAzp-8,K>ogNg>f>gO  LSHTW @2 BX0uz7-hPtwv) :/ ',c9&yFR)r,`Q-`@ f0%єdH'zJvYH(ڙhN$0׶ !dZXq qV-CF⾈J4vZ(۴sBc=/ou`}߾~[\X=ߔXx0clzAN ~ǻwՆ&6HIF)*flof4睁U9"%:Go1e :{>XRWVy$C3l?܌q*v(,-}"IM8{o-;n|2s0A iX&l8]Dc! #y,$ꍏ6'U5bNPNxyBpt#UBrɨ]ޗJJ\rK`gcJY@95߸t(=fEd6h>fx)dcU#I+YtUA=,nsk$/Yӏ+$即IJ _=G6LTWWWUذkg:]XfLT_h@罉^Ckd`{U{2ٰem2`+` m) m'{%9T+ Ε c(}%aC*.p =:ȅ"}{% wq: C;&W+9>=7osOB*"xùAB 1XSh0@!1JȦPZ;dK DBE \@HHRk%xƑfA9Q.0s_SR; }io"}t#^ǽo]cA9Y|֗>U4Rڂ:zٰ4rKL88#<ǜK~gB$#J3L}N 2Ah[hbT|r Ǎ"'hL`d) Jx@ÂP% (,h9ˑv4)5gWGF<*m%)O?r S/9hډ|po/h4'^+z# ML4Z*Kd3ր,Yrt0;eQw')cqĽ[{t ቴȧ%' Q>?n7y,.)Q4dcc4'?HҼ5"d_ S!asb3_SIgd4 ɗyv Ă4c9|V wy.m̉<;5]>'[o1x6TsFF"RH,>?_jg?ٵ/k,.WFfWSf4u&ѡ|)9̩i[̦m~ٴg16&#a\J$Nt@`4*|Zfb.!zτ{t_PNsMFƲODqr$fv_A1Eml+(jv\f0G5h9cЎH5e_1wjǛ劻w9[m@=4Fo|0;Ssn k rivqndYXxƘ!C- o|LnpC9I9G!a_>Bq(NP"V&Eۍe +mzĘx~pT^n}~,.nrOucPx(-hPj)iӴ^_e([xOvXƛt) ҈@)ZQ)/3[BȴYsy:CV4@XfH1x/uRKD|«CIs#$̛f/<^p`x$0FELP/ A  Ej:RP}W5-^Y%$`OimiqA viu &i߱S\rm ziwݸe\Kehh0\vI#0Ֆ)FBҥFmmWwM-A-R 5޵⪊bD뫳+2])3K뇷'O-9opN_|sy&i9ad.OA#FF:fN݃OM|<=vi q.Q̫ᰬ>Jr0h 祰t!Wd&!`*Jã(N9 /R+#ѧ\/fJ j߽/?v*e=(SE[sxTTXԥBX@:S]"' _̾] ]^,޼J"Ys_}K~u%_GcxdL«e37x=gT#`V_r+zEǟR m}!L#lT;F +kl*c6)4VCdWЬ^7- YVЬ訂 .EM šf*h8I%jB֨_9}|5ҢR#Qpuތ$l-gZOb-U|lrs7[-BKkQT1I+mOyI:N >J0h =Fr%A(-.4LxuhF n@f.OL[qr:k_rvd.\Tn ݑ4vm>@+=wQqdiZmv*E:\2ol Q&>,2N6w85o0  ,O]};9z4񌻍{T6.(oTy7/?ׂC`˛ŗ My`o}0C0|q\EGngu=oZGܳ.qx8E3:.274fVW(;( CMQj8,tw؝Pnp 0)f Rj볁OkU# O뺛yC}jtNk+BTa5bB;, %JԼ--6 퓏(F^9Ga& ,7|sJ_FIk# !XУ3q7j\a-H]Z胖ћwuUuv1rv}ywȸg7f7>zޜА7_^ry _u=+V-< uy/F3-UMLݴ%ҋZ$-=bҤ]%ϻ |eAro ǸP)HC)[9|suK9 Gu>u;E[3[UNeǹa Qnm1QhbN0B gZ4׺u!\Ek:N1u׹y}9wؑk(2s*P#n^e"0`HĪ2NX 1R!Z!* /K8"̱Q;4qD  TlOxw`>+;_\NgKԏY8 F (b@p6~6x;U:Z(zԗ d,aJtĨf.vJ^ě/i| [, y;W1x{'i&RqᔔߨnJZ*2 P i;B `NZUw8WNTu_T KQLRrυ+xYu $7Q QB۾g i؃Iv,M$["rW3ևBT1Є\hBt@Эe C$/E+tRxq%dTZbjT+F<Iz2Rd-ͦ*7%҂94GJ#[|/۷ fTiUK2 '?Vߦ'BW$7.JiEci~F\+IᐄU%}⬘?^]Pb钒[!Y%vAɻ"\@ZR1TJSkrr6p0UmIܝX;hR _UvNeZ+?K7L hsJsV9Iٗ*+\-<8&THN픧^w9?LlMI3nMˋ\}*w$m jx3LY6Ye&\Tt6yȫ*]i eq{īϭ}3[; Ţ(⤸8?Mnf6";]Yه#\h/?N.Z^!OeY[;f~x=GءCPȭ-~K Ϯ/+_/VxzTGpd:G"*[_1!NPj"/'1Er69]=S"Ce_b*zg9ٴdˎh8ڄ㽏y~ [6Wi'\.ٱwqի 4!s34l%Cw^Qʵҗt f{e*=Q2%ŢM}Q k_p'ϋ<Ri|]R$X{1E/NIDʗ)o(hPYޏwGgO_CJbUvH*lu~0U::o_)5;[ٲ+çs?N\/bI,vfBTCRI>$UPUzMEK%x[zC,؂3M+#K5y6MaR5e,5wpYNx>[ &jIS.a\|~ m䋯 :L?F,.'[JԳeL%Q&Sx4##Ed,h4wRee%jbFcpަUPB (] 6BAZqj:p*iolEN֋"MQhڣv?"f(JZA* ̩Y,h2!N䚴 wڲwMdbSFSdwSdj۾; B`=XZ){&T&j,ip F\X;kEp5s %7#;h (q57\Ǵ1>(k;xÜ}xyds/Y% E͠oF;;i/>׉rvL~;'Z57ދ3y2\K:?&jХNSuh x%"ic/zE@?j~Ct&Jzп\gM!LnkXCس?Z`Z =#PlZ#Ǜ~!we6m4ܕ7! Mm9}RQ޾6~܃RkWU÷p)Eo&;g\17(J-vYh*FeXSd]tOZG8Q`!O ݤ WO;gʚȑ_aewRGallLў'o8pZlS?"%OXbI;-SET"D"bn~C569DmM28]*T~[,XwT ,氼fS⎤e!~6ㄉ /FEKYݭiR]Թl˅4Ym+4Bc8)g}0j!uoTttܡ+! ]Rg>J$~N, QƩ qͅZ@K=w†"0d"3D뾨Vhݗ,ڄRuWUqnIS /^Gg@~t1)W\*|;rM VGWے@Bym#T&nŤ$fF%R^^'1BmW@L!REetˢ S̮~N:FFo31rFn.Nj:$xk!raQfNy#JxampPMeO: 6 wQxP9Lz ;$E+7J-0S(悡JGi4`ޢ:LX&n W0QjW[ SUw-[9-'Ғn4ct$qƓi[F8saR87- 1e<$T o a)ػePF^4&EI:y|x Yd D%F:JBZT qF51QH#( Kx$)Tϻ՜ 5Hbm•j%P@_LZk!DN5üܕNbN4+5#&h!2`^R L:jONJ1j ,)L4Kp'a"dEfOYQ:5[(d fa:* ⺼5e ?{SM-YjS;0ce`oq_{rnO~†0L :s1θV:-㈻HzFQ2Đ#Hhq <F&`P"1JhA<ZXNRD\c*恠Fllo,:EKzZ[~v-8Z3yHYI#C RHZ*)H٨a%A"B9l_k#FXP~bTb$2y(CaVn0((qg (,F=bTZQ!F&YɽlXn .8a =,lN?'ܰ@X1,lR+q($ ,/,PB,dc<:ޤ5Syaa $yZpmWT@H*=Gޭąb| g9K E\x b9D%)X3Kmv.P#sX\=J&!**=NTNV\q<\=v=&JrKcW?=_^+,-Z\J]i'tKvβ'evEYw_z_Y?WZp0uHDžJjyp7ߚډsN@+R]JLk'!;8jrWddz-;*;w9**dvYm&RKӾJ-`:]~עSv#?Z/MOyR8לnn$?;P~>o>L$:UӲWQ۫,;Mi?ruNN 0(%K;\DȔ;v[pBV9SU/[PLք"ZG87Ƽ ڭ.rD;hS"(*n3[Le21@(y~>-HvLq~2)~N{wT14jlf7Wr4W_+ŻW&/3|E%5\o:,,UɲT\XRUP;n' \=?\S"|M=FzbD]|Tx[} PwSDKQ$kmBF-^ $@ao A2hv9ǜk&ܯJ~=ZQ@]l4J> Q׵0[t>B4oG±+z %|Ũ~5L?E p,bsou8ÿ.֍OEf˓}(ۧhMI0R1PC07GڨV<7[rƥ\~r0/y ½ajzT'Ul4Y)8wT+``aMZ Q O:΍y 'WǙf&铕$$¿RSuxpQRvg|nJï,t}|)lB"J;Щ YQ OSIt:*Y֥DIl"qTtgTi7YVAu|//~ajp^,#J9ۂЂ%R{䵮=n#'@ŮЭBSP>77çY6$Qp md R1cq Ӻj6b}jsbDj->y㑹 }s7Q(nƓ7qXա]ivTӑ~^Ĝr6o8GZ h/y[ z$Vou#L{:lNG8Ǎ𒺈 *PoŒEr?U$%Gn1#JLJP{Ho͎lw U]Yn gy (LI32(|ػ=VM`=ѤZhQ S\Ԇ( 3Tǃ:o}'S+)tpQ$x!W{RH R?Շ/!Nt$\ū.xN.;s%{[f O2k0P? RQ _4%9t^$61<#X?!oA'Em) |՟1VZLg]O u\J>Ԃ3؄x`Y39KDS>{0|&ՙEts0E : O tbkn5N }">V͓j4svCh.ڝCJw=}uL5ҩ=FKi&DB+pKߕ2YdKĩ8y 'H> :HzP@)?#m b3Hc[Lv+K/dRQ흭°ӚUl4zVXg4vA,PYRɁ -R$9,f c -lEVVf60 'b!Fp>ȺBQrEYa/܏+iZVJ4W(\L5hg{1bˁLjs|:я$uf4HWd.Ί Pخ jYn٢=+ۢ ő D)-ʹgȳ'J)z [A=#(Zu:jÒ RTŨ+_??s)R[ G+o~>[wp#E C7b&qZ.,Ҳww{?do4r)Ck | i=@P]נc@@)c{kW9вb-[\VƆ1YWT(+8R4G[Θ:VXceJEXI`a}YUL1Ya .RQ gdlgɡ' e=+X/J͒=x3gQ\4 ૲F T3isBTdk0,PwyV5[&[ YTjN8.K4E*Kv,?v;[;1h/UPFu`BQKyXϰ?Լ =GTX?xn1&TԪC9KXUC2Z*, u򠑝rWsX [eT*,Hɵ65*F !`hOCd`e@c2ˊUL}$) ^S` MԣO4jjR3Ydo8TEE(b2ȑ G3hzj%r\cSw|)lvg98wf R5SHSJZIۦɇj fY"oM )yw&WI XY⓫`BMPS1;DqoB ʽdƟwQw5jsGQ%qBWb $tlq=O`n#OHU,"C3Um'̖eQ T'~xh! 6yR15KU`{Qp;%Y}цpF=+K 삼bvdMI5FUAqt"tP c ]2w) Ih%>-yLZoG̓pa j~QqÞF))1v2+\;_BTC=Πv&g{]չDYg,INb棎aYa#݉foVӨ-ӗ *p0,EbM6YaU|k)_?w  UՊI-4w5GLsq~qV~̄û3P xUF[pyݫlTK tR*u{`te bw~2k(unSW$8)A1pB8t_ej In3GS r\`Btr[,gI|&/[׼Im5~m#hM_xls[G˶d<6#%}ahq~@0V .,0N9H1K[$-[ƿ^}p^7$\`(;džA"ʨbT^V8w9٪u(թu,{uT~2,u%(;Am`saqݹs$jC[Y(JF'mj*P1mpjZVh:i=XУG֭v$Q 7uz >Bs&%#&OH5jsm-rRŏgG# 'Syz󒮸s{{\Sӣov|}ϓ'ӫտ]דarO✳|7@.WGT2gӣ6ŵ"~,NR9?G3-5(=\o~ _Uٷ^B~ ႬeO?8c;o2*GAT~Jz8|0}(aq0%>qH{i5{0za]};zVXxͯܣEfRN; A*GᱲhW BN*x"l,OJa1^,ُ)`t5)}ROJqdA~ K7d_]#6p3 Vw\0[@4$"bN]cg5wtcd=d֫Tt-Ğ#]1}t>&}U]V(!F?d f` .UPbu7UI}`X2=XڀjOZ'gSBЈ)ä TY5UrN IA&$isd~϶CI7@ɷ;@ ȸ.[!\@I#ԎzbWuϖ!Ŕ̂eSL!czi6ړfݝQL:f*p675+ _8R0j)﫩JR9"3;bMeFvsP;\N&=G7sҀܤ2M sƁϑQ||7.pM7gs~}<dzh\%BD쬫Cy)F(Q J OᐒШ:hEXaþ4v7BFj@T`kE Eٱ-}{Vy]JXSؚN 6b?ۺ-czVX|gگ8efw[f=G7 ;=`K ?- 4X u*9ZG57):[o0Y*"&m"#PxwK>g5J:#`j38*md̋F鶕/;/ΌH%l]5-&gIR#X#XѸd],LחDv gEjgSxONéw縸[q1uf)H+'(7r3}ÕCO|_#:b;֒TKJ-n$Z2'Ӷ.~, ҢOX= 8&oI鍯y´hHq_xH%.RzHqHE;pj 旅ֲζo!36h~y G9BaF=8wJeW? fG*uĶ& \\L 9Z-)JmIW:Y > X] /oL+qdR.9͏fy}+|LrC2&jH=W(Jar2eiNIrt{HGZ*Dz& DN٦apx%*(nOSo{Mw9W奱}Bvڟbw+%{p/#P_6Ctp/#3%;0la#R}6/AxI컂z>NGm%)O4e,ߣ\Єhqo_TOy>j# m_^{e6?<(K/]\q:Ƀ_JxCvyFi>+ j,H4le&RgXsT6aҦ(^iM(([/ ;ryL\ş\|nuQr?%7 W!?q7͏_J/2-rrn9ֆ bZY_+O?_N&f,:ͲN3X5SY쫣Ir}Ckzt̡*>)Iӫktk5[m[YDݫ+sS_{;yEE^aV -R'S i#w=i O&܁3' ásR*O0-_crO Kꋥe6Y[;!$H7FR xݡ\V#;!FH+x1(E$xsIs}nL&Qkr(NT,Ȱ_s;(yսɛ3l]<&-M=̛vֲϞ#.40FBV K(LZ}PC% Jtcy;F>Pb$Cšd EWV (;\XfrNTjYD K<|P^3[!8I@(q\tVhEi{pFy+p(3H]`(J F&j *>LVr+' M̐Z #)C%bU fVSԒ{^du38%S4RH.obgZ.ܿL(Fe訨$(5L>]+էbkOCK-xX\pqqJ-$O9P)ħdϐ5L$8Jnj4LZWzGjgZ8~Z0_(uJOݚJH[[~bH- 2fHm`|8֔\b\|s@STb)T)'P\Sh2܅1[m$`,y/&]Y_@eܙ|`E܌bKg/ BbVO$>[u%&qڪd٘AJF\vnlD/ZX%4kSLE` 9IX#etITN`č`[&()zW(ؘm2t@HRZwQ誀#~dc>mk]Xj2]]RB(}J^G}UsVa*֚9~)kkrhm}UcV$&BؚaJ}tQmQ%:GIo"> G;s[&!O46 PgnmK!sut,&Rj4I?j;x5ޯkɁonvma=ܟE> rM{%A߉ z,V$i>,_=9'V'`g{g6)a6FWr\xq^a.TY?`Z.Ghixَ-{ԴqiJlf(gkK}~Hchr-a~yxN?<c@} Si}}AIU!OegFu5>6iKjXd<}槷ͺ/ 8FWRc~6My?סV(a4tߚf/sMgJn3ցY{,o1Go]/m/y[3|)aV:՟=GlحxcYz"UۏoMMx󹍨:ܧ3dwhۄ=Jvy 8 PhRo7Fd] =CVOC K ]B?'N@.7'o$퐜1хmיw6oߥg| kB R}ahZNMmGVB6p"2p]:KHsfbzɚW5[g(gw0`+k<grFZ1C2N2]w}t0LmUQ[Sq*PscԤ|.\FVu&<$`h|tACmqƌRα鞕aNԷJ艽=+os=+O̡U wqGgܡL?Gn@Xf5[_˻=0f;˘5ɗh|k;ҚxwwiXE~T6sͫMRԳe- ̻/.ϱEcr).mu$}\v&#5]nnRQp/NHMS%VhYf;h&LAXm> (k!,emN܊X+K X+dԯMU\kz$?qo,7^|ua`UNlf)7"^G߯h4i_onwDG'Oñ4/ɞwGJ99U<:4ó&Be?.AEsrO4 L[kdD;_{1z$A/L1 ȖK19(6VIzF偬\é#K`|ɛ=U &ZI] ڨ|z o=[uNe:)CvB>OBf7hH4:ӞhǼ2交Gw7+M"N:#\$ãǞCDxSkV rF&P #WK6XGb8suǧID@8E ؀OCn*)xAľZ$J}۠!9VE*2;"vVJiŧ1qG)f$GmAE֨^b8 `3(`Ms22_ #_8 +C7Sv=idPp eIJdCUyasģze+yg7g(sSM k?>lsޥmrI&wU 9veUhr܎m !MM{ 3r݉Mn[ ~"v-IS-qZθ'-X&[͙`Ӄ5-0>2X(iXqہ9^jI[ 7[Yy &lPBԸk:'|?KO.Zjjdsxn+kd3}+kp+J1ivT-qSfk^5M^KN[kZmǭ h `~.@| kvw·A3y[߹54 nIy{t iÖ<יw ټ0U{q DXc5jYA6uW2)ש;0:d xx$3sF|FVGf:D\#r.=fHJEyēsǷtՏ+ ~[v͙VqNY$ DBc*W3PV)0ǮN9֤]jFNrs]BƑ&F7swYK@<<&{0}4ltym(ṃH_˭yOTEm@um%shL齏OռEIC˩;l=xX/$$38H<_OCH ''Zdtt)x^_!(_J%do ;#CR19(jʭXy5Y)^.seRc$UU`/FuJf@ʚ6k/JUYM,3vȆ.ZX0(mR٢="Q6dWM@cpkT.Z"jHftʅM+M3U<F;#"v!*0dAƞm%yf2nq[o&ݭ ZVX,S5F #:/я/ίnd C!XXq:Z830Z / 0gw MGi\̬1MȰ0;z VyQZ@p(\X")&R%A:z`ևYǓKuOMhe;Fd9p"MRy:QX9 TJgʼJJ623+J[ZTZt^}ȅ 5ZLKiVV59!ZdEg2XDbM'jTbw1pFl${N&53І3k3+%HA$jYDeB $PQl6I[g`DeوPQwK,6HO&,YU/\\&N!JJm22G,s@JPᆬڰeܫ$q0dHF2$,ݒwKBT@Bt><7dgG)^Z,;dĐB2wj+KwfatgDɔ4+ 6.mTrOu7XIPfv6 ;mN+M+D]; Ii{;53Qs 7ߧgnPR8xvjYc3>wKnōTy戩*/}8S&-Ӂ\ ^%[;$!/Hx#8O^2]cВD7cX_t30%gY2'n Օ'\ Yy]>Sy:\sf~wt6%A6|.d*}E==0$*D>/>_,󪰵l~1yMQw5Џ8Ak+a5CJ{n3S~Ka0e}ho/F\j{S푘\#T!{ soϸ'u8u{Cྍp!hi `,{jfw6)4\3ɻn1:je<2mrQ`Yho{9ԛbM]06h&%PIž[0K`nvC"&Q0b4QRo&`$|wd4/٩Vnp'oѲʊ2J^L7?l`æ$D[{RlwꪁJjR]8nR&/ua1;f?n|wY2_Ͼp?}ݼ + ]]?o7ŢlpZ?ԼvsV͗Zϋݼ]xLwԷxx'n5{VSV?E yS4fNo֍hbu3ub#ĺ0mdmYQ֭ʧhԜR8zy˺f5cn:##w;1”p[t4_߭ʧƜRL5t/v~/ |Uu?12_jdB)I<\AO-@!IIJWOADǔ׶ڍ_Ɏ 1ҕPHe*GaǗ6WAhjK 3 i~:e&RhY ޲xbt( 0c[káW?~xkm 0>Fq٫8";zv%M{")%[&.2ak-#jH>lJf j|b#xᬾoP.f+Qs`߶u=-V>S';B Sκ!n]̐W>E攥{[M9ug~Gu;)z`-ub)ڜSVxb ! `o~QCo>oؙFo% ^ 5CbL=%]RT҉bjPS*@tEiSffέ/!2&R$D< -UbHg*P@2ɾq#,/>͋bZWӼz~i'mƍdX{CSfVA;CR٧Fd-/]?u(2ǽ;Ӄ_sF*H`7_\WkfKM7^:fecɋy@/50^yZ>~:a<i! ǵ2={!|xbpTvKip:E !a:AzkgB:|UyUnY!yUiiqi @ߛMh1_8jQ JEZFh$HĝPR%)SR dvqRRGa#!ꇨX<Q ٝb~)AfOGA߽ 9iΉ,/~Vn~J+V(I2g,Ҽ#hwO?_WX-̟Oӧ͛UI8! f )~-!b dI!쀄ut ' $afc _ (РaX}ti@c.6 I7+0>(ݬN=ՈB8ԢQ7*ѹwWTXkșfh0F3Qbulw<ƥ׼(/l@m9-TwUUDO٧:FԻI(A7gM?9+?Oӗ%v3Cd^&E[OѴzZ-xRnrZ\%y\/.ˢ./~u뫈B%R mkɦ:v^o1(V!ͥWnTxÑdq9dNSn4n~L9ѥKG]%XKj8u0=z1r@ 7/%~pw44 to.t_V8ezh=)Q.9˟nwC,\/@@AuZ1m`M1-3| m.O CWTڻpRr|4W'7-(2 tӡl\CAe]֞^y#ڗpO߷.aKnNӁU tpa̩ }ywdN Xp2ȼ0yYoR,!~tN [Y-˙s$#r<-)-NsʺC,! CV=תKwԱ}騁$y}ۖdrZXqZ>dUϬ=.y<3ņw>J=EǙҖ.Xsׇ~ꔒtۮA%ز$a2y*4K1)-Ҽ,r޺#OL4/N*E,Pܤn ҄e3Mi,S"d,-IF$Q-h]U,`7ܞ&YVHX13P-g(Xpx >9 TKAEv  'Jݝ/u\m7؇ ȡLDgZ+˅M (KYWˊ S EAEYPlO5_R©Ns&R-YĤ )IbfJ.XAȔvA^ImԼwQ%+E/ܠO N̔6ڌj&Y5I_3RVϑ/*x*z1h A՜YwK6N!h6Lʽޠ k>N ^{a*@/=N=Ω«D  x"O r1-}G@8bKw1^16(4>:;j8F{X=Ns)^0VbtCCepuI14JXa/ # ]$թ_h`%ujS ,T$ണpG,3Ej=y=NVg޴d):;֨2G58k_^l򣴂8E4+K?%a?l[apwmMn'V2יp]feS.me5ҔOQQEf̈"FJ*i;HDkHH'vDV;UEɮ[23Z/׷5WJ?0>p7#DpN/ 7Ѳ)_fΉ½bK҉kxe喲E+`rY|Q_кuسx TX7El.T!MvI:R#0VRQAۃ+AΝHq2@&bF]ʹleG78ÛcLQb0֧]GzA /]o銱^Q#/g,is<_ø/iV~wށiݹ+)0yL7io} eS,|BC/kh_7߾,KUU7:Q UFLw]ηRf-^O50ORU@kbU!!B`(SYkL*ZF @$>6i:-J Di kXCۻnLlPz?Wgx6Ni՘48{V|Sԑʏ0!=oR79^"jòc"bU͙lQBg+ԤQ7M"ǂ7Tyv:c^!6oHJ!'\nDET3K&1I#8$ID&$aPCK5b1j14փ |J baPZe*ksls~>&rc2w3i){),5V# |=ڻIQ ,k'%yC8=K `smp o$x?%$e&ETac:FTkA0EGL(Ϸq:dE|E訸/Ac$sy=+g\`Ô}>?>'I@W7Or_EmPPeZt+P*GP9~&UUwTSҜ0d9ej.ldu*iϾ1R]*׊>]qvkǘWS I.Ѫ@XS t qUnu$*]JwE%g.LlAZꦥTmĕS$;PO4gl"<r;I GQgwrA)v nہd\Fׁy=Z+t1Bhw((@qek!>H Vtb6(N-Nr '4#9e5.em9mgrP0GMv fs&xt '+w"rrMO3(1c#(6Ypy6OF.$}<`0 Px#` W_+0x^m.IrƼj|$XWn C״vĂI]A$2ݹ)܍\/ >u"hPXuԺre3(N(JU\.YkUIƲ̳_O2npHzIHFtMnu.(J&Yף(MVǹ2Dxoe~wy *\N>͜ 7IلRN0'6K!낖iv^uHLY!Kѕ[#d%= ~0E1`y)]M- W2جw$DKh`9j/__5NldD "qaXml9(92|>YJ"hSˢF^7J_&S~L3grEr9 O)Hϗ/ɥRACD"M\t_[zՆ#%jN7Baӈc'M@&dn+SJ *IPvrjf!NRTP @t=t׊0 \߶Ǘ?W81$F,.E6Bpm1X )O( 1$VD+IoJgPQFX SU-A 5S"0X(O01׮k[e+w^9dnue!yM9W\g6ȘXF*apY2R,ImImL SY (%Rg0 ESİ0$! NE%1<1)4ɘKN`( :=]>XYjڴ2Mu]|C?tUoﶯc7Q-y!YGWJ,wYL-tp7caETG$cR`-WLgާv`p}r,Zq578sJ\ǞoM)I1b"5Z%yt ɗJđtx*?W )ݣcaSLsZT&=Mu #u1CkDɦC1!%tOF8'iB ;= V'TRB`#KS/r&?={$<T\F`0,#w@nQa]>>hjI˔^w?Vr߅$ӱ('bqy&j#[K_7 __XOb#v8] Wf F|Η"l&' _QaMV'^hŞZA4L2L6UJrdM$V#"O?]-,9w~oo+ 6ea.ܽ\9:VލA4u&ew% Hxa:b,!#H;BVssy^:$h+'$`DMlX"$k:8b%x ׽|u\ B\(V/$Z+Q8K) 7wh KPm\1W$;c_lX%%GIFmlω|ޑ;{ZYI@'%L~`?y{afJ jb5{on2µ>?kxox=\t`_\P-JhwӍxZZRbƪkd +YIj~٨$l~\XlF&s5n ShE>7حnR[v(P"SKSqcE aC4QDd!SA y@*4¶[ayL%.Л}z/Ky[JIE E;.9HG:$ńbK3RVZ@۸U7j)a5bS%-IGEĆ I˜b T$BSaĊrTCrb)#Q?9^m0X9+hJ< 拇x+QJo OWyæ) λ0)mWӡVr*H:VoK1*;dcAR#9F N\;PA @oM'qըctŕZ͡Xu.bHйCʛ*V+ e_Í.K'3W3ӍyZZX1*܅Ќboc;`j&)4^Qh9,Y'%O6elTRE/Z~sZ4,`_mdPp@:6|AQ>pي[R6m>#?~-JcLJUwU:ǐ|N\i]tetM'}: Ͽrx{oza,U˅ Jr(+]ZJ$"mPr}R"s mSDJxR Z8y.6ʅ_GVgs]Jm>v3( R5Tc5҆왺'5s&I?*{KS5K /¡I`/4܇i4nZOBQ4[N͜XxM8}. /^iAHad5yJuB2M5nI/IFqЈY)FZ2:R+$O]f.çYXրIX cܠ,C稜^49?է~4gy/ډ52;hmPBWҚ~ً6^0!o^/mI>VzKCkTYWZ -mj ֑ӟ6M`]o"wX][oF+_rDrWۀxn6$2 W[,ԅ))^D E}ꮮAo]ĖTP*3p ŪE,\"k[h sVw.mIt1zq`r2F9B (CuJ "ҁ.} 2I!x,f@ F05)B ҟHJcGoQҭY""(A"(' 8&$iW=S0Ċ{~ ([Yr =6h;! BP!b;( Pמy.NA J34 LPТh(!A9<\K)֔9Jo0Zn(csS$eXI) MSReOٳ˚"boR.0@l:tΑie=qO}> €KeuZa9~8{{]8D3O qj3\;j1Ш ddҨ634"M 0>ׯ L')#L>(6R0t)"$E)5 `tNˣvdLCϢ”J~g=uRE:oߵzcΧFi\VMCl@aZ~Rr^8L^U^쉣zg/1(dfh1mc4K@`I%TD![H ~1)ژ;[چlkh'_▯s=i% Ow?7E>(w#`6O<]Mَg$5T*fSԝ>"u;ʸ kjzaaọ*OWZZ!R%ckj-u^4S;G\sчDmF|UT1*RD @ 6<٤:7LnbT\)Y:k;GBI2 q={] мSݸ]~ q羝*P=lVTHVkQP)=\LشvE*%|K6qi/)~Ԙy\: K";ϯeI |V[rFj.P@v#i±Zmmӣj6 ?LRA`\&l_0`IˡֵRZf;O3emycnNvtMHt۶"Ev1{=M =[''E d"|pT]Kg7֍h:eQĺޘu+{hݪ`h)N=!n*=w+3 nz4"ŵn(n0StS1PFvLqCQ,llmV@Ԩx~fףz ΝWAN7{]}JƐ:eWy/oalyh%KNvg5l/UT/,VwT,Pҍ|ky,iq,dj< ո-^^/e}Zb%6_*FcrIeۈ (6o#ڰhSC0!EsħzTb{*-YY:]JlŨ$ R V7[|q]:|jQg 1Fl,py6pĆ^}q 4\". 0*zZoayY JH`ެ4Et{Em0N(^xKְ|)+LTܚ\mU9L'E ժEi]+8jM?ڎZDi-տ TPQb ucڞl+Ҿ$J'mzñwn<[x .=_Ʈt}r2;NrO=tXSF`||zҶT6 odZh+53٩ͼ)0dޜY(NU79WKVJ*"M|4~<pS=*bhpeqkxo!!T蟦"Ue (2ȁEQ*nמmh*[P[dd ңYt./+"8`;͵ӥo|?qo!\]_,#o|"NgW}-KȮRuȾ N~`eQ!A#\Jlp0aǾQ1]^1Mpy8B`#2BQS#}~h0gŽ#,;^6K6o񠵕0Kfұ KuY1cEc7b#BLvY^gc|$[R`n,d% w+QRXa)P >+5t1:4;)mKoױYʸK;o_zH4K #V.pXj3>jRc ݌,u:c3,%ZRn,RԱYex3LR.ƿlpȉRI+XzH RX),e̍6C]N% Rs]wVK}[pbpc)V,R+V.N9Q:P+XZRXJa),܍ԮE \}{"z:,}Vjsce:6YQv*USTwY+e4.UwبLWoza0[K;;n,^ﳄ1MMn^q*Z-cF"kSyJwQF3Ҟ%kk?Jt)%'gV&|"locA٥!F#0Nt+S$aY81҂?G؜Z]s*r#>r7,fqgʟI0xh3߀8;yZ>PTbP}ħv=e11]x)07g'H5P- aNһFjخM5}ң]'x'H\B^P']^8gӗF~HJQ_DңR(EI}X1q?{Wȍ C/3EH\O άaP(1 I=1}%ON$vn*$D&dLgFX%2xTLz5%`&V1-P!7aɴ!RL q5Ĉ Y`n­z0!n~gn~XPo]nF}Џ8$)՟>O +yR.V-}կ~MB6ɭ IQ?p1FrwW R@9ۃ[cH`o։(0'Y*/5?M ؾLQB!P w _JH|K{]lmTRAG&ƫs=N+x$$R^R"::җ=J!{0I{1JG y#]u6("<H s\dַ+P eȥYfYioƀ"oURZ? Q4cmݥu~f#>H_͍]rZ^ :N0[ usgRnre)X?20 1NGh,I݇spb>rbag;-lr lEсwH~_XƳ밆ṴkT[g>*! ijyюw狕T`MbS0kC~K8)"^& xR -$ǓY?NÀ}; SwhޗAb<}~11wsOHC* ͫ04м*uv>%\[ε)\(QZ,TY$ f Å#H㊂w?Yu6Yu_nr-^&o -HZ9X޸KWgۣ˫fPӘѼyT jsT#aC%*,u GyqSJf !I5Ҏ0#FJ:G3yl(K6YjspII j=H$d$`_ldp:KY 3D* @RqF J|F #y!Edzg#DBChrgcÈEbȆn"٨GPfLJ *eT{-QF^Sxj9!e*tVX`!c~Wġذ%Wa+dlz eIUVY(V [))n{d((.6lͤ^mPJPذ᱂wa.vME>kق{Qb_٪髠Х E;E"?~<{)?įH$Q"13C+hG#T ֥/j5{W_6)1G1x' ɥ3φ?뷧 :[5U)fmk}q`׆,0ZIuAP&GHF!qgg:HC$Ĩ-0f1gp22gV2e1,28EƦګޟrPޒ c[ec{SԁP6{+w}?:5HZk(!a 4["%)r^MvYf6 1YZEg iU5f_K/<`e=J>oC:-ERDS?( n:KB4̂M\ jcθ-,sop&QL% VN.={5k%#sw=Z,~羮ٻ\Wr'[G- 1TH]{^KA@_\e d,\jd1+i:lS^W #KH&s>T_PJDaOU5hpwBhj'gn[gsf%<5?N.z8~|w=⯃[ j&A?9qGTeTb~̿ uvET+S@W`7G~k7_ H'fP `Z,%(lY *.D!ZBc ҐPMLöod D&))2lgZE{{sPaT1Ug#:&Ģ$|encE+TqC#0;gԐ.zD썡>qISw=O(CHo [|Kԍ|tkO(S&o+MO_rvA7e҃bR^^!xrC2̃MatHWUl~{G'/.r<2W_ gqK,DQo^u6WRBa{͋9X"Wџ uҼ#t ".oln5s(21wb.JCt'|;+Y6C\`|8:0֬^Ϊuz@g^!ܜXr-TkJÝy9˰1ѝrOwn4,( z6jz9c%/g B鼜Ur>pv /gy3Ud^N D TA“KAw{rvW^M_+!]kT,ZvI^;;R-]UxnҘ}Wr)C~}u1_7W6HkBFl> Gp-uuyS{[M;[~jo-Ʃ7i🬚_kcŻm/`W}G лx:sGZ,{S=Ǟ]a140jlo^Ul.|*sYC#ZcjCvpBVSel)EiW#[#Zc oqj7vAЩ2픻 LtVnM7>DTd'CD@w|,3? 1Pҕ(sd]I҆yuѭ$v2(-U֭d$r$ZS"qF$nxx4 _#MҘ}8@^-,n^=P0 o1a.֍؃1[Ppk=~Q7|cb?]LwϨEFwz#I ԙ } (ǐsXlb+MNߟ]KWsN 9O`xw'.c퉋z'PGpcT]UQk5sBw!jF=I z4\ʙF݋?|D xg:gW )_Ee,͔E(OZ;;Ω=֙مbx˧':g՝$VW[ϞFnu\ 7a Fz)8kߣZ.$9"8U`XCK*J /?UDCc鄞V>ZGǧ޴6QЩ߫]pTCTHtӍW,ST 4%`&V1-P!ʷaɴ!RL q;ݫ #n/ZSuOV^N\H?gv^ܝ7 dLy:ZqYF_d%Q$%nREFJd^UزNykL̓e*)&YCCw7ϼn}Nm8v~0]حZѽofZV;C+ 4:s~dnLV7O/:l8iVo >6~cR*lp cQaZauz?ZWA/g{axUnnep8|ܻ|;fuyͯ- ׋ p1[Kyvϫ~^*Ws݂fIR(by!k> f6j1v,jNVT;,pt>ה;t\CH(3<*y]&m!^83]:<ӌ0:d98JYWq{.߾YJRtt}OYQ ?>(rъ)tGp]Q[b$Ig>Vֳ ,QBf6iL۪qv6n;'SA-8Tmqx6[O@'{6sKؾEZ@M5<DQӅ'L$I<+ҩTtMi ngM:=y>iN;qzT"V34ĒB>qȢ 1Cٻ&qWTJPL^v}Rɺgd @v53*]4#$J@R=v5 l|n4݂wY_؋n[P2EPsJmsG\8.0PKne9aҘ,1cb5ʜVyabIAF\6@d!c3Uh09KU)ɫX~ Yݏ/˹Aɏ?ol\D~|)&CpQ[C|7w>ypE0dU-~w7b2_,7N'|&'Tu˧Gwq]2?^,i st *Z.3$ dX-%IiT̯TrI!áօ,~Q#oĆ37}f8xL_[RGRTXSR9"VX ȵcN0p L˄7zeJ# ]IrWiH`1 On"8S 9X+T+e`6jbB`H j08CgdL)q藗{U/M~7.Q#{Ǡr̰d)~xd Lb;;p!%3xNF A=kd䅍`px(~zK3wwOyܤR8bҽz\/-L@EN@ʍhGg[?an},B>I)?gWͬC a+'ƍn1U4YzlB6@+¶ D1@oiu{%{j\2 BxE0Go0Sɪ1}C4P:H>y{F53}v+'ٓ${LY~('yφ\E)yb!rW)g*k$t(.<`3h/t3H  rQM *M831p]u|񩦒͸%s {a2qV9 \agbj WUw۷=kv>:%(Cg>bgMO7uF w4ص~}p@%S.(!^?r⥷w.njfUQ|T{>Քs*.Fm@p .%~|Ċ!9gl9R~u~ߡNOyj؟ʣu'MDl4EH2WeYoVSn@m}J ?c:]>E#-ѧN.MA9-WokF=[[h; kS~r +'e-I7.I2mvfnĈN;h]޴[z.`vkCBqMJbv[*1&킻Pc{_ֱExNYFO}unP3I~ym'DEo7ޙy_=A0<4QRyQ gZ9ƋhPkC_nVSuJwۦp"A 8]gDKN`OW y!kdT4T`p~.3dyfTHE{C-gC PQ^[ z)FZHa,95wKK[xMjAR1*r;%1 :~HMea05Z[hBRuWjdr˩ӏ~X3[U Ez=ҽ䕅jT*-"uq1;?yU/Oeb慦p%bp~ %/ r0,^Ha Sڙ?ͿT9lSN{8KJss`%iւk˰‚ b͝dQL$s.U33ɂg !p Ţw|%b {Aj_38Ao8J` '*NfR<*߲aj TuR#Nh{ǀ6Qc,@қtIoH+v1bs:S8nLhG2w@op;&a,grBl@(l^p?X5z>]2&aUbw%L5@D*?hg,!2gHYNc8ghF # :=.sJ8BS%8:\(Ɗ漰q`ʥfY 1]1b1DR:VР3)4A(A!!]n*CPJ(c&F˧rG=͘KN9P͸q)$-i]zk KZտ9vp\IR ꡧ؃;n4~>lR896NnX^*Zޛ["1iAڸST <D1B+Ns׵/b"Rpʜd5vrTzM8D:If 0*D:/ }. ~wQ>LF+q4+a(qǼZ}ݫ}L]ypBN>nHdұJU*u AEö aX-vd2ϧ""pC6 fp օBMeҬ\nT{qAHtSG )&R*8Χ3% _*1RѤRN"!!߸&م|{ڍ1F/7Vu^(>9-P| hL1ǹ [*1&풻?햞ڐo\DwdG#9K)DžR`)eTxn9a-E۽r":8N YEœVyabIh $YH1Uh09Ep.Z/v?B̺Kn5$OōsQ(_ޕE`ya hg H}rSy guk3bFd"={@u&'/HnfeRgM~נR\@=r v;y iM+WR =J7 wƅ˽d3{S&g+[]'qE!_B)AAX]?unrX]JjS8ږ6ܣvBl~H >eO}wFwfKY)1f8ӵ\>;Ot$R˹c靗Lj韻 ]E\48zg$+b6>xp}3` 'ˆG\X xS7=%^Wa.w-10KC 5v5(!8Uܜ 0N;CK˾ '2)J"\8WBh;rq \r( @BSUks$fBnFXlsu;y;ɳKWZ"żIWﵣޭYd vOBw$ۖ_s E 3 `3_ *) v;r][)^,(% cЌ+d2pmS` ^?>-:*YKrcJ SpFSb*$fF¡JNhl^+xOCP'+ .mރs#DPJbiA0^t˪m|֍(Irн{ZͿw£ ( (}BmiBhifGԾ|1~NsD\" )9 Q,3y]o3(" } H7-AQjOة=EJ3 9CjK^#G%F[cM?ewY4WSW-Ny=,ϙv1cV2˴kcfSA)8NB bF ]fSJxQW4<]rf7fp"wH&e,NwH%;{$"I$IC'N;F0$F/~wד;_rH|]_α{gCs Cɸ&6Ll9G.wÈ|rnsUC9~܁Y+y̤_ F;aŤUv P:+,0k@O]P: _WfhkIɩtXr vN#.)xu5SkknH7PsJY[8jX,S"8[("q! ÖeK,{jՐ"3jtV}fSթeuR};KY Vm Dv)8嶂"F_Oxw1X_I?[۟>$fjxe;|2N}2 i*nuҝT+0w蘆N$%۱,[JB)M '/[K!)zJu-( d̾1i8"ĭfTV˩hةIVeƴۑ*7QhbtRNe2?P$nlղ+1Yclx![8j0}(ٞ=/ }:IY>G\ٗRkUfߘ~:_%CEruBKYFj]MEκTJga2w8Ւp~ܨoQ4MF b),0[LUK&D&(b8LVtp ;bg#ͷi6G~XnI3*4]iaU8:eq'#*gzR<<9KԈ 0rƚF (v+Ai];y Acz =8L`:^6*7t%P |owF69$nei;]ݗH:E'v NmL>U_˜QӃ+wN>Jes(T%"e:A8Ԏ ]Q i R=`ƈT5aևnzŽi` cAN }v f;>ڧr~0T<`Z~a}XIQ Ҵ.Kꄾua8BuMT2eVZmhΣ' {ԎƄZ~Csz.[q1brPglbk;Uv~U~76,nr@*79y.sFm٠ΛC6]"Kt=.5(>_ =fwn"dJSOO#JV f'cBH(gǓ،3>5At*֍X? ~%|5ҟeBV_a!ʥ~x@"LV HR9NKJۓKJ|油 B +)gy] jl tt0MoW|9]xH+.wլMɉ&.'2JLm]}5Ϸ?3{SAy>%D\"PŊ$8̚;ߏ2gM@ Ă.[:,RmY"6V`Br1)N}!%%ML5qV>I>SkiKyY$ʩO$h~c, V GW^2*Њw?u& XuzXŚ#:- 3~dZ8fD 0VPXؐb/A)d2 iT N!)SDNˆJrX:RNysTq|d =9Z+]|<˦` ^6QA1b@qj/XdRq +rႳKf<tang!p|rߑhWg0dc/]e_.gfj dwwplK$OG~DI2Zeg߼6kE>)(% r d!8#;gde_W>Lo]y/OidD&(D^1Q!}(U ze2!Ϡ8IJ%䦁 q>x)ϵϔbJ)HE%<v =#Zym{p\z&w ]BfF#yjbJ?(7u_K) NFk[sݨ= .BZ{yז/+L+Mg~J 0}fBkAu@r-6purE"%ik#4!>aps!OٹZX9l8m0ef7{ ?w8>u2dJmL9+ud>Fl}^"Oo~̾QjzAp+1نy}]Ծ$+KBCPvlV[R4+}VWvR4ga3]pŰt4AVLՊ7 0ꃮMcqJ$&_X\zBE֒D6w˯!S=Rz9\†ocIкlA^DRj`?ELhB5iÎpFT馷>#߿$7{k X_NzxAorymubW? 2fIQ rZ,ddddΓmud6DDX%)I)08[Jt*&*\:8R !Sbp5M?Dx.CAơOcUg=s6~x R^RJ_ﭛM*bVW] #n MVm)ŔCNs#ǻatظ$ۇY F Ah7&PRcֱHspT bA% /6j8GNgӡ$5tl qS~&}*I7k6T/sp Oi}!jCu{$OSO 5VX2dC+>duS"~>Aa벤Q?ZkQ%?ZϹXtgs P&QL6 rBh p0q17 $7p"T}So&0fñRHX1w&& D\B &fiN"8]q6S˾8o<[åP 3Mwbףw cλ|W1M]Vb#Wɱl0cql+vLI? `V?U3i-2t3k}:˺ۧw34_[9?=yىs~n"{F<A%\*j|G=-I}Wd) @QIpv<[&+?Ҩx4"t{hWz16#!% )O Mc "v ;1r\Qj]+6\H>`p`w`ˮjNI}%HОfiP:R社ڒZ#JyNRRJd) liK9YirRP#aKj[j,s({v gy[JjKjKjYiRV0z5ʧYERR2u+XJYK31ѸgiYG|DVԘ/2KIା.c)iÈOgWRcz,4z1R_mI'ն ~6Kâ'C¼, 4,z %bVPI~N`b:)b|̑tym(Z_8 X3͖׈J5Xۃ:Z<<zb1J&t1&-J#skN1ICz˽ʥ v&hä"faƓYz{'w!rºW77=x pi$Xlefz?X:b +GQz{U3v*Pq]y>Ł0!.iۥb(-lżWz:>Yʇ>Sʩ6p1槞J!o%Cߖm FI F\&Ԃ Lc T,&OB9Z(8KcɾԍEUԍ!m&k ݡdgICa0B:`ӒSK1Jr b%&& ;ghR+F@ SB11jĆ;. R8<aK4t dpL yvUu`x)'D\6m8 Iи]MpB+1"F 4R4Mc4^?ݧ\TQU |4i*\4;恺 HngE <`@FXAdۖ^=+r9bK1^P^.2uO|/|cE"@!si7A2=q@/?+fMT_0}^~u˜ɄafR3-O‚2uinM6:õ&:r\[ЁdCĐDT )g1/S/`!9":d8W> B-9-ڧN78ԫ`bɟRJ#z/e+t`^ , R!Vf9U}[<-GCeg#DR hqUɈ3 [JHx*2j#"hBٻ綍$e+Kh骶be+ Mjp֗~=))`ee_tLt#sɈv X"K2EY^Ă4ɹQ:ՙYACP&iS{<֞Ĝ!‰p[{Q_ZվJjDGb`SWJr=N"@pjjaTQy?SB1R3;(=tSJ"Rw<1,ɼYw+UqĶ&"ӿIJ2Ie& IV qR*Ff0'` 9҄ fDKP8,8vX3ed ;``PDca]h᫔)}~:Z*d !F",`nQLaE1n*V ߳٭ <5_9/ݝQ'^ ~3cvC_>dyp49fܗMph @}kp#2rG{Jm[K'~;&nqc _g+t7wxZ΃@1) u3D1 X~\->KݜOb2_^\oT."QHmmLUQR%R(X`'q0pY5"7Ov a%Gp x^y*d zt[N%yS+2W-'( f?z3!XSrZYwx_nw-ԑ|xg;"'xd7\G3($h<ͲwAl[߻vl?#hFdډ wCَM tO5m2ڽM01dH3DKL t)Pk^Zsr螨ٯp8a-3X=EwtDTs`FGc89pa@ FIRxWinx JU M٦D6a*D"Ny]9:ޖ^zlv x:)L9_du*~-Da߳]G(nRpyu$~ xr9y?XFOD;;`I[zv?AL탵cD`BgÖNfҧdJ3ZZyu3SH W73N\j4v1~Ҟ,7DwI$`:W(gcJI9QPwԜzC=E9U-kޮm*?ϟpu>NSs7v"gt1GP< "+Ԇ7&[B脊|nweWgsJuwևYK6 veXV,Ol]/[U"׏eVLn/A 6Ҍ%115,4#dܸ]adqyJypN>t\u7~lCNfԖrMvgaNpa>*[D&O3ڞ͕ 3"*;2 A}g:8U$@c !*)RqR (Dž*D ѶI( !#S-Ѝs.83Tg&saK1ќ 4 t;y c=17oCe\V9BU]_!ĒvwUB;nզЎG =2;w?RfǓstoεGٶlClg  ʪgatQ̏i:/k4(k N}ߺ֐vW0pxސИz'\ugh Mv~OݖӲL]0Ė̑<K1ҥ5Ae TuHp$BQQM&xqL2fJ43Xa")i2pC(H<D yvmZWRM6=4H¤˄u*)!cEBۜBjsONHs% n`ޓ[T&)V0Oӱ?p+q"phpNˍƧ`gxtՎrÓe@tbnݷ(XtOQZie_/Sƃy)j/|tG  QO\yEurtD]sxߠ/\ypHYhs. mwV.`U6$E!RJ>iQT2J'ed刪eY V?J_&'5CGHs($4f3L3L 7۫¤E$ ƈq6I~WGS, cose D&)t,;PUdx1L8Lc4π՚穆B)p\$T(#m9$r0RF'4BD2=VcWz6nR}Zhњ =|;cNP#ɥ|rwU5Jq1ƜcV04B`~r$dAG;uƚGUWbu‰EsO-D'pbͿ",Aϩ_/o$%0,E_R+q/r.oo>x,~<CA.=hZޗdnȼALt /3ޕWcSo)%jxa5lÛ ߯V.QT2R/8]/@ө9^nu7VX]|Sśd|,r\DhLb$&i %@'Hn Ley tDԼ)Ypy[r>2کE 1zaOi*>fIt8V+3-uOҼp1.iXRpm p"0^(>Y,k7>U[>Y.Ub|G巳bmNX\n(Pm\76uc\76uSpkZCYqٙZM`uгqY^*8kOV9H/MHZZCG ?䃮,(1uTIP8.C^`  S -%1J%($ؤ"U@LW0! b)*e9bTgd!Jj rݺ]5f6NҲJu\vsu=VWJP|>W$# krjH6Z~0G?M>.o "޿ v^z}Ǧ`"x_(g–qi\}QQ&DhN#cy?b 7D0 k ?g3%,J1R lxT~pC7%anz)k if;D1~j晴=W3Pg0 ,c)Rk X#EsE樹yLI%ۧg*) NSP`穀I&HR))6J2fdNǬy*b"(T.@0]?|.@b䘷MmRp nW]ǫn;ãUxEG!VLT[9Q< I2 k(4y\"M .L9"E66&+ `p4C9`{cRT9~ V`.>0 b;`"hXn=¹\8`0Eu~ƒqz`ri̛s1)ǖlg;_mݟ !;0?+`|rCE"If%G[VNV&/=2c%RF}O%j@; J# 4l1RpLqޑۆZ,s^a4T{XϬٕJ?&j=x&L;N[\:wSb2_^\QK:HkԕTAj w=b9ӤɥsO 31Xsʝ!1f#˔hirnjScw~ŏ ᖀe#|{m\Tzp.FGE=89x5bsH)Ε_wO@-%aP7Op(R }p^X8ɟâR3LvÐS 0:#Zp`ַ\v5K.R9S,͎]o, ,n uZu9_}S2|=ͫ.<6~ڼ 7߾'(}ƮХy>7-pѪSU+~7Ri[FI'i}k ؽt{ͪ*֑|"$S$A[(. v: ['l:j.$ٻmeW4r﹭x?&;3$Mϗt< _,M_z1(Pi+Lg. h$sٺq҉[*1I}FvH/Br?~"-ҺN"O`H-wyz1k9w&{ ( {i}ڧ4ﶒ3rohiE.nۻ{jB/pq<)⽥6Ʉ_$:pv޵ZRhԿ ܄5KFjl?]mSAЉɩ{wDD yk>ֺLfHF^O D{5 CbyvV黽Ԝqk",eWrr7uC,Rt;;pn/F wdm-L9R +N &&M0A_W/#m0S4Sw~d4떊A uRĺnmtZ9k^f<ҺN$Nmq lݪ[*1I}Fv8Tݒk` hS>8A"E˹qr c2אBTY.R;dA{GHo2cSˮ¶I?j#8os IX\ xG?*տoY0Nv_X^IXb_H4wh etfsH6(|g_j35_ з~6pI:UtAzGdr,$ x]/B~GbfMf]!8kz|vptRj>J?I}ZϧYm뉹%X5ۢ,|O1Wp aȞ&r`u_|[{ QɘKI*j~uRQS$k5d m/dD!TvI2.2 h- C%+VYGשɘΥͼd 2*Um$u2[lE>^lq,E>5Ntuy"YGrR.KEZf&Rs#K{Rz QMu^ߡAjXYԡёf)q,%@v>KvH7Kc)ed&RԑfiDg~H:8i.5=KM,Kfp].]Xn.5NTYJTK RY'XJTK#K{RXJu` RRZ!Gq~H`)tqνFGqu#56Bi޼iԵy.*iXZ #Oυ(3HLݩgdpFOOJאpj]\'fGw?]Iz!,+GZPV-ӉJm J Vr|jyOJ|+T6VV8|ҲԠҲpV&TJC^ɗVZrZw І.7xb0+~K@bqPݛQnK=~_ٳ'7,xr3_n:ۤoR6QeI]aZ1J2!g<4Y cOsʎ>{WWյu(E&sh2QYjaĿ8OjZP8$릲%ٮnP'%GxNcO*}|4jVЎDNR+z\INGF9)IIKebhUֹE˸P/98@Nh#tF@p|,˕JI!r֕j dUX8* G%W T/10u,wTQVR@C5%έ@ +&3XHMT8#Yf #Y4F498.9"EA93NA<3Z?JXd 8l΍) 7M ղX}ѬӈGbGER6ꌎRH?sz g%_O\/Bz *@7|ď?>er7+}Yuw, Qqoξa~*?j(Ocs7~n6܏d+H})ͽչc+ lGQ@APP?mM]ySMN'ydxQS$)D=:i6CŊ-lGF+1yVP0Q쇽( ]]#}/*u-Y"K'/$J:PhW'hǏ+!\|A-nj |mC N'jh:`яL~:?=on2{[ggf̑f]Ϯ>z4x75#>v0/_-ͨԻVF{V_w-cFQiU~ͣLapnr7KznުN3~9S"8J%i!<1*+=!җ髃!!=o|l!ݲHӤz Fyu;ײZviӠ"[ yMί8ShR1N3XCzL 7떞ri` ֜ Eֆ?=Q Vi#;gºJ"KTj "n+-`|?H^)l)bƤYt{=v&L7/9p^Qg'~3WѯQ<|5v\2re=nURS%UL#s5oK)cLj-݅RiKYKnER6ŏRq,PvjS,$yMjM,KE+g|҅uܣKM@miYJdKɬV'\o.5AzoRRZa)Uq,e,3~Y,JY4# BA YQZ(!%2@ ؑ!?QڂTrk!25!mgxHB:ԂEMՁ/ u&dgZiĹ"jM4s@68(Zqhk\1G5a94YždRB%ˣaav?Jn/F^F^F^F^T9\ 3ar XZP.d~yVU d$π\L!Gɔ.3C)u:M!%xV%PJzP2O~x$d@%TC9#DeRi6N! (̂2 u0 g # V!mV7P eB8k[!^v?/ GAT%̿ 6pic=MLA(bVEABcgT$*+AK:aЁ2!ç`kcQ csSL*Ñ9!g= kƤ2d*bV+C ,Pg2ON]x# MAQ %ux~Ǹ>?{fWӶ7WJP>>gty9=Rxe|VIĕ*rVS_x׼su/k1N8GЯ\}QzTp?5 'Ĉ JAN7s__{Gr#</} n:wM&'[SO#GEwח/? 5VIاG~ۏAIG%\GʫGR+#5N4 lj~m(bt-\{?7MFjQJ. '(y3QfYQZg Z)4w|}醆u9+;YQhGˊrKk)\f*rn9.Z2,W%9'z#QT']kf[81fBf9f Ea5:U(θ)5ETޭۘ!!9$1yIfr$i>h4ЍD0EH “f&aAb `#qHH0ph$8>0Rs vV(I-#5y*=BaM^!i9zc8HIv k[ߍk}<5`5S=Rm,`iU*!G oCaV6p3+܂~TPT\{|IdCP 8p2U E%^eQbF<LGc" JfFXG#R,#AbgRB[# XTfw~A p~juĐa1HD1 LpCP_C"6a5(4% LȵܻɽAG,4ѣٚ_Qݟ۶)S9S,ޏ4SI? ?M߃і:k≶[T&Kq:֖6iw8Yw;8Ťqa^ҷuT.Q7ݴS"}r~?sŶsf.QV+5]1_hjV1T~O^͝7wWq77g`C$ze*պ76TG>vektbn^>vK00ې)!YTFjb7NQNqz?OZ7 Yth|1p;X|/Щ_հu?7h`7NQ/NA>n |=Vy@vj`7N=NaGN1WH'_!vkז8ꊌ7!A}" TiS̗ؑv|p`[%S`y5WW_}ʝW 3JVs,PR^(k/lɶ>Es<Ф1Ղe7rκt #DPZF]܊edOUuڳMHV+IC)y {L rj^b mQ .p{yzՍs^ֺu-QW^ J"4kOM&ԐL_2JtWuNnTln ['wC%EםGKA|%EꭅQCʲ[L,j}frw5Rzcݣz5#bػee5ӰL-ݓ|xQo_ \B(n~R[z@Q:SԋSԇ-mu:5_4Ѻo^ҬuӂbBwn"@9&6mpnu0#!\Px3 mTda`\PIeCO]aSJ|gwLxL%D6zw_?* a0zdž} %Xf  #!̙$xZrDLTp[V(U5d!vdlDUՊ34wDŽs֧<8|/ Aܕ5fu_32u1;|}\QܿC'fJxu˙CijԸRLzXݲ8f[b5Ш\I[L]+?9KF* еgj:6ӌYhyCɩ%'DwYaiКb'~y,$R6Z`A3ܹ9o(@&s0A,u".I9hd:8Ełq&҉R~BzgS9bn4HChJ*4 D*vJȑ !n$HbW!5NQKk+U,) 4Ȉ%)FBW"H0B&%c1@r"";0h>9K ^~xu*bj_ \(nQouQkj1^]CqpѥnPe[:=dVY7Z:SԃSM=/.}GᒇhN5u!8E8ka9mT s cQn%&KO"c0ҬR=dWAlwɧQf+}O(Pj0 @څ`o;n9L]&U juAxҩ _ dX "tk|r9[;$ȄB\) c6fY/֢ .Q"xa}0 >mF,~GiDi3OP֣14X$ ҘA2(JBf0E*`$h]i?(6St}Ԇ5N 2M艕([_AmD~'{ y#D8g.2RĽf٢0 #VZIjAfN(F"PDVL]%' ۾m7cE6R.nf)an,%lV~Է{Rk \f|孼SOy^ެj54OKtԅZJܮoR%y.DwesU|B)>8TU"xm{LJ0^ջPܡTRRƫ.Rj6^lI-wiP f)?˱?@r]{_$ =e׀׼V(gZyRJެPέ޽x A{ӱ tP(P\e胖Rn~(ykBr /YNvor9W~Xg5PTOxy^W"%#% 1Qb:QHGGޱ-~\ w5ۦM'r['EUj\E;)8afa\Eg KcSaRJWeL8\P^\fz_ 9]aaQ|{e\cp_z YNy>ӣEcr(EX#Q3'wذ  ox<9)CIT 0\*e  /J0XbH(Mx9D0GwBc7+ H( H8q(&D`bbh'Za\Ls̱!ϓIнܧpP !wF#[QJ!T Cjቁi>(ZGIas QxF Ch># ~FD$+."QSh$d*-tLd`86Rȕ6o G_?@ؗmm&;B߆i"i=2*?>Ꚏ%F (zfC {4_6)!l127 >zFeZo4!-_Tkf)3řF`qcN_npEAH-}SKcJQ~0J?R7?6i vc$EqS+b;Ƚ{g~fWs]av 6#~䌊 `Cs!JAoqo&X/k$'am{/Ligwxlvt3|7߿dݑdOiL(Ź4W?֭kXS}D^UrlŎqU})e-kC8=vb8u/1N".X<NFV=’`pG0!yRoy=y;Ol;Iz\kzΧs/;v=R{:ٹؘxU٦{$RQ"׉>* 8;KwRmvB8vBL (;!zja1/ӟ-6 $W EPȈ1s0+Y X”v=3X^M'W+'\m=yw49*m sVhԭsȵ|,!`XoI!yY>\]j@ ƶ[mV|)hl.Q>,h-|AeA.$ު&vSVuj|lէy{[B[1b:-YֵX,F]woRs%={^ֹ/@v B  AZwU!z֪ 8Zdo(_,n|}ՇD36.ml,a[fi&6~8Lfb2/wW775xBkmF6)ŗq>`1 CIfa~xRC#[jI6)K2@0#_}],+?ٝp:E [Glt6+拙ۯ9[#U@g-wՌ*-1S4S Fmt-d@ubb݆uYͺnm0S4SM.ጏwkb|F6ƉթϺWqZ`hSғS2A joQo|JMtS^Pێo7}.{⛈g.Vk}V+%ԗ 5FS)c+%L=()w,ZI}ِZC= cd)L%ahR]d"aL0F1#haԀ.d.5Ζ̊X%on2)v|sR_6:w{[Kԗ -K:Y*<72uNTTxn&f)Q~,%ʱ9 >'eSj ;Y̕ԔQ9/Rk.wbQK.WO0 "c)VgO/n'J#(X=Rk;'K9uΖZp,}^ˆ$9񏛥XJ=E祾lJMNR&Xʪ(`z,eK:Yʏ܌/G2c)Vg#g)~,Y#*2 rJj3K9KuY(NH4Sj0I&m:@AŅ`0bj2n (E* "':Z3#+(Jn3a(-ZBQ_yu{3Ͼ~zTTaԮ p/'jrkn{'`˞>_̊b~_}_̭:߻T&`s%zllB+s=g+"?J9+Ń5?.v2l'x c5uAlK;xX\ϗjuAZU9]PFN%|(Ⱥu"|˄OT$ oۈۇ!"j6>sdl&GYU*!{bkyqn!l noܺKOMq*,S~1W_no+ aJj)Cc>3ټV*J)RAPKgjYWP`C*%|C)-+iVwI3a:~ b}qܡ'TpXVp A:k>J:`5ŵ~fL UwG1*]oeN r,LO)L O*]Uen_urʠ>c>)/6iQATR͎Gߚfq6)?Jp^ WSD1|AYDTwP.LQ&_,,5B~]/iG?Y8h: cض:i!P HגJEh PރYK!k9\y=N-N59RJD*4 E SR)^**4"  {ujj+kiWc^н%*?# dLcp+F3õ&F2Z !i`@Z4AiRiN +shI42Z+ ;)vϭL~,Yarn ou2 E\?4/J~-f﫣䏫bjO1uZ1+[IDA!R\,i2QTʔ3BrL- g!n`(=7A"Id);( d;:& ӅGa3e5ȋGmx][+ ՍuOuOʥAzY|zoV(j*6z}7ڭWQq4ddZ#?+|496z hS$T @hTN(ؿJ`)k0y.΁Bf:͍'o?=x6G x ir2eF"~%kY̠HI&4*X1 }F8L@9clT5 /?_yq} ]{;V^嫫'bKݶ?X%=~I/??D _:gEv·r_Q)X"ն ΧߟY#9{XqQ~6ӯ'7E"TrJyh)ݺNe/]Z[{/7]szk;'])e]n%MvI=SVE$LfXqo|6x\uOϲ ${5풵~ͣ(nrI1}p6:X\yNa{oht]}fƀޛ DB zgFQź؇,hw0 b 1qЊz?=@ u>~;%,?,>']d_gL>^/\.?3&;|\/k;Vdv_ݻwFQiWR11e޵7qK*fX \3mת>z}(\Տ. ~9% t ܮˇ _v=p @3 ֜n Gc@vh rњ?\jxh[ȻåjxK5@\5=rl\RKPL@7;YTF]6VV:ȕ5oMd?qz |JF|3o?tads-ĺ,S7+拙^)rsL݆f/żYK y=yzۺ+Ahb1N3Bpө$YOC8E#8ϱ:[Mc-2e@ubb\Пunm0StS虮˝e 0=kw纺p⎍ٻUnwnmnS.huecaj1aJo{ԯZX-HE\[]_ [!PI-6d(<_9X)3mHQˮ͡JҺ(!h۫<DLqň'AJEjD'@dPNoT9%uaSS̬W9XLf*ZncvM09o^QJ _SboT ~vI;\7OsuNT- Ap^ OyEbnT's4O%̈́"LRhfRfJ$ L]BJ5Y*\@WD+Wfv/+Gg ȣp]2X4 @ E b(]d4WB10F8q:`E5Q7dvh v4^ؚKxwݔ0f_fA_uWw]"(Ӝuײe&I*- o@Lt.=%,eg)T4Ej9-.rz3=X6|n15X0B;.ߛK9{qBEKE/B;2(7t<']ok6qkәi gj,_o*ZP`goI*jnNOVSZs$q]hSOaiBqlR` v "ϱ"-eHyHI 2CJʨDrf|dEh(SƏm2/TF$f,9G w %+-\*))%L 2y)0L`PJ׀aQIs2RBh\( m2aQ%Q"S9jT'DoJ\>Ot <7Ot0|Px3փyB~OA0T>nHD;κZ-&<sv"kc)"SBD cB=Ǹ>&6$ve($./8<8fEj0Fhz :?B*_#072V.Iԋ'1!@ nn.\WNb%LBP:6#ͥ?-v%Hz^̕GZ~wsDsNtC0  TaÎDۃF8Bjk8/+f2?TI1Y\R\3BDyjǟ3JR+%R ʥ~ e-t[m0d$0H"PX0®SR7"re'ŭ4")h .( uS('xˈp`åf*X$@^|_`KlSnm:K-5Քy%uc1)zDVǯ`<ٷ7N(Nq:iݔiFы(>#ĺ }n57_Ӻ7NNC!=z֭2u:n"Zr-uk!oBLT܋^D>wӋ(`ڂ̡@z )c<&Yn\$T}LOkEt@J|i3K~1w`|d/E69'gE3YyzKZQ{:uѶTioREkib%Ub}(\}.A~4g` RQ~Z/c><>Zrb0JE fM/5$MSpV{LB,ޡ˫{n1Z -(΋5:ɘ"n@48 t8,tdAyGcIŇ?a)C_tvՙ m(hR Zv&Qp ?`IR׼!QLEG/,Zz-5c<!aM *xu7NNCz[,>ԉ}Fuּެ[|Eo\CWϳ;Be9WRwH[\Odu_\<8_wzWNӻEȔWkvl79-~Qֳmm4L BМ(M A LfiU&U,c2*g ? LVd;Nvǿ.3g{N#I pl';/Pԧ1g_VR@TM|llTg/+2+3 fXCf\T9V*6r IAFa2w(JgwM8,M?ߎźi^e.i|>+z&fz,C)¯Cӥ%/{͊4v*?]AƙsMIXYRm6IcuEi3iW$3@ro2Dh7)uEp;=gYr31-mdkXzk3eH8`*pKG3_#E9UJo_+ E+Vufv } }eo%Z~ydr$28)K!Y'ߚpfIy;NZLKb^l,҂V3AsЦS9Hr`nm' a:)>om'-S;RzVd8rL: ZEާ,K:lЎ-o.%(a7INg:G0{].S9MTٶhIe?9hT=DHur-J.w2k䒠WHysbu V4tHHܼ9'JimnhƿSNi(zm!u| 6B^Z`R*:%D'D/4\\<н۹|s 1M)Mnhy-_5BJ/-xnf_]F &4aTJSӈWl|#j;#l_".n?*nܷ(tʶ-t>lPP sFBJ<)Di9%D9e ZE$b%5iFj;)2OӧMkbU&468fWyu sEŹN)7HhizO!v?B/ Z?-ibm`~N;wj;q ӏ_VގNXP+wޫpbަl:+8 ! ^gfB9`3eΔFT5v8Sga$>ǚ)jYfwUU)h{s~6.&)%Zsu+}2歝UavLΪ*{3ƪÎh^1q@h l*OLК#p}/*76p)ճXEXN[Q=|,xmQ~K)9~e~euN|dSW‡h0qPq6 q&O_U=zË,u쯑Ob|G=1sard40kݝOK`IIT<`[ޱ,G•N!V:S+͐awi&؁9PTbgv/*|)c~aTގOx"z*FOȝٻ #8?xG$ƕZv;/qqaLJ%Y)'3}D'ԝ=_ ={Lj {/02F9A\zRg8$`'UR˪wt,\Y$:L-}T]~s;&(D`7<"PaQV0UXLgsTdqKw߮ұ):auc{aiiIH)-D(/?$_T1$.M» A-z?t觇M߁JkۿpDdkXM07f: Ƅk`ws!h!?P ÕR ~6-ݾ!̅p7P 76O{v}J)Wng}Dj+8n'ek07%q.:`'Hr"zi Ƃ. B1dANhg/xP9oX:Hq;GcY@mypF=&|<*~>S66P؜G.Jap^T `B5oY9PJ>;0XUQUP!t@C }L4qlr)QZGX!bAT^ 1G%$8lX϶Kꊠ}oVCYUpX4bUYx ȢiJGEbwjM2O#1n5Z#!yb$KTh%/רaq>{G{ej)cAf8#^TJı`fҫXrZ1),U.(y-p׌";X3KsL>Aa}$*F# ?fBTxqI %&`8vZP)̺C 3` ZπUI?V3`pIQ Tr@1@j0!%!Hh>5`E8 {$P,[FL3Rxx7*5 !g#mPbW&186@NXEY0}{ppvTjkz =NxZJVXW w ϓj˖_vꚃe]P^ Fox=o- lS[y;$-mveYw `kn=<<5a TPxKB;6bf0H%AbQn! ۍ"FFbO%oɪif)E}}Ug6WKa6MlQJE͒ݮm~ci %TC!9T^)Nl?v jQ*&=*E˄Fk}?ey;NX)<-Ytuk'Egufzu+w\BK!, U>ӝVnPwD6[_aeQ\'3R YoNV:= A!sf-4x]S Eb`Dk(:/s9ʂi ]b6k%COk ߳|T+-9ُj *A(g{ 1&$ #aAÌ .X2tK?qȑuJ-Ot?~T=c7sJ( &0kJǎ$ADIxkgZAZQī3ϑ,s6aM7ɬ, @&8o/_d0ū ukx:5Im>Lg5&?A XOic &{j[O69`憻5Aga$ǂ>5!X!ijUQH c5ٟ*[m荍gx׈x*>Ŀ+R4:1hݤm t=,O&9 IZL%iwǴNRv؁Ja6~98FECRJ9_lzh*H9o.: gTw8?ЖwH6=UG'LM,RNّ*S{0S\+} HƴV98≃пXX'LC7WHLt? AퟤΆKW Unr;*6ʤVb˳;OCXs%"uh Zjr7/~5i>9=dI'mҷpUqIS*nt؇낍?q;Zڹ`a.݅noFӝAL2J{0>yXj5҅D*I$"2!Jj j)~"_ ȏd LHhT/4kiZjYƫ2\a~٪@0̉~{8nc'n9Qb0{C:*Ҩ2+8J[9NhJ OK>Spt= [lLSt >pVGlr Y諿R",Rpi1G4X"lj+)ٔERXMg0tLt!PsgE1'7WApVƀadDž4AIJX BW޽64f_؟ + Y}uvvE<:^#>躍,D>X *|6|@>Gt-6T'csW\3dDDCIPPhMI/ƃZ7v%pt[I`qH3V0U+ Ġo8`tb=l3w]#̐'(rJXIh pf~Vfcߘ!Ayd; {`&Zom &)UAX\ OFn&o 1>! zAcIq* -Kj4үL~2p*ur_pOyp!c7L Zޙ-taݯWI#$F$:` ZPö0Th_Y]V[9Ͼ"}e}:C!$^MdL3kM+6~lYzdK_|4A v>l/Ѧ۫s!J%8<@IkV=Gas>t.o"WWR-:Sm (u_"ߢxD魋$qpG B^|19l j."b ils8d 90:uAFf 4 ݳp q|CpsJ0]ooWEڏ߹2ncbh(tf83d5jtrn… t}C*@MgtXMm~C98YbƜ`v%Hbػ:E' цˇ(T2K$\P .b z8 v6+pW֤*" 꾹}C(f Ǣ?ئ[/}vu{뽶r2[R-`JBMvkInܓQn<TȓW#ƴk68Rbs)*FBmr?Fk2!lLiR?C5ٞ]|`'?1Ą&1F 4DӾ H;XO^L훘𺌺CS`, QĄN_wkN̏ N A0iջ͹RUu;[^(@LѪZ{h4-Т~YwJAԮ /fmtΕ TJOjBPd|ȦbD$3듉C8{xBb5y֭?E)⚚ A A9S)+N[ګlwޅ8J ɋv"U|Rxl=ט8HHÀsazvwXƩģ/zƳ}y O'[Oeʗ^MPM uMUHw:N[۾:-aԉPaHQ#}.&PlVy>ZȯjZć#PUȭ3܂QW}_}cUBQ?~st6hԘ̃fXqh}'w uZe'ޭb8`smF԰av] _?b_|1 hR{a{svpxV:>F%"_g bG zH#^YiPuB|ᛞQWR8!Cf#++-58~h\= Vo=vhVr8wh}H11$'QK:<DKh9|{-jF1Y`űjC,@tL- O?4:)?:Lkс[viFH(B$c+x(4A-~-?\g^|j2!/"~^}hH|u.OE™ǕP|UEgak|< Ã~zQoccOL\+XpI !qVGbBgdk絳Oq1ؔ=ɻ 6imou4.gdkGieYb )=ϭiU[%A!2*Gb\ӷ<&t{t*By}?@\|ϫe^j}j@| :h`.N Etؼmӷӕg@qY{Nʯ6!{șa\_ VץxOYnQdWܳoZmN<aKC: HGG/VǙvYi2Ukh|r5h_z[x׋^R`X[5scuO<7_n#_r6(]JeNŸcQ/{=zN 脧 >-z?<[*YX8`(&NXNdt_={`߰ }3<|^Ͼ.5i͋P )T@MIR>GuQq€).p_S|w7Lcaqu6}`qJ!fV3C2ӶD!$͓M뢫v2bd u"0h/pјs;hV<,1IМщGA_\>>,~KH"zp=A[LYɼ"YͼS ٗҐfof j#Qgl0XvWOspQ1eV? )1ϡysU&3p0,}z[:fyO4&J%+ d+ծƆA=3lxt=ֵ4 mg+$66k۴O/=qi4=ߙ{m>פkӽfnkki ҴaGA]41^w{qP)` PTO &]!8dk(ܦ{GbzKeOyEQsFMUHUqlNQMhX޽js1׊=6+Ѹa>J&"d-&Nǡ sbs8C0zchH3:Ah[.SXSY(i.*' xSִ5.>\>fe? ɴ]Q,&_1UVWZ<4پSx8;„,юw2X ,1#áMz뇎8^"1whCp B>ptuk$s /O7O)=W4 Tuwv,vǞBu~$cr\2)r4% FO=VBBmZ0>o[룑k1rCc%Dq[Xgj0@e~`|&#y˴ϲyE˟jώf$9 gоѦ}!fOQ5@k0jVý߿{Z[<26N L^JrSau bueםċߞ EWc~m!l (5jPL1QZoJS/uu}w0;7 pwCifI͂Fes6.0` M9bq=]1E.qs"S;K\%|6K!jƨss/߽>z&/3e}}{48Oixr{4O& ooNR Bwy=JQvZN9UQJAC'Uz'tdÇ4;,zXQ8)M4ð?b{Ih'/+hsk" \Ĉ=5Ί1/ vp׋]'(dnʀ z';:`ɰdݓ_/n"}\FfY^?(LW x Js5QQ˽Dn׊Xr]K;abdoSvi</mn֪,O&dSfZ+'2:N9+U }^ߘ9M'z65zlq{=-f[8L;KqY7Ǔ%!yqS@UUE@h:yS탫ަVFe}؆SCY_٧b"͕^1X$qtAF: gRweNIc QwFIs3ֻ ` cϰ f[ZdEУ8sEe^XW)d6kaIޕq$ЗM}Eq }ZIJ!CC{4CLd6Ld]_x+!^/%O<୵V}ZK΂i[m&Pg9늩=E$Wr8FBY#qJ:ޔ]kA|JI|\/!7و?|>?qh39̋7i+ch0&`fu?,A۲5}F-4i0.N|DX@ԗm=飨ѹ ~k܁ut ߌv׍Rx$]lc/Ga7ؘI'!zqU X$ ?6/IQx_f߂5J޾`M=3̻23yELf$/jAr*k v[C|:p.a8.š:4~\LQG,#SMg? ˙O5iEoOUj]4i_RZW0 +Q+}]L>F rbĕrd^}I=]_x;;n.M#EKpZ2GZ-j} Ny oJ_pe W-TkUBfY18PkŇ`ՉayL~^FG-~+n]\e7O~/ߍx_x WvƤ#a4.dlWz(S5Bo`xP!M5?Eڍ9wgxK腇8bL1VVk2f! bvy^CAˆW]Wn{No|nσ9dB0r FQ5y󦋻=M wք栿\V75 S ;,`'w?̆}V;q46XMzMh7?6n󉕼y} kJ_-c6 {qԈn4 j`<~nL\l~Q> )rʠ輄G%d1}$=L#*Y5&W6KE="y<Ę0Q@w?lG~`5z !?_n^9x<5񋦷'ԡpb(ټ^y]" oWf~:xE+%v{ay cnV%gB%EJ]܊/[Pq,JEG>9F!.N!Y.1K,4KO'Κ.,R>Ye5hm?UDU~CZz+#/jyMj1}F~Ҹ`^Yi4y'S0AMW ? fō4~X>o 0~Umfl}\0t g7RsC!4`P6p ':k˛2%5VXY.fҴc]R`gxR+ |,vh=w;^,WU7N(`2rO9V[)iiEFcMxBpS<;0F '(l?.x/ Cwc !)7[mMM?&n.6WID9JMj'Z{q& .s.tl[E˹h(*sxЅjaF(J=\Χ/m]s %s2FҨ祟7BtιxV(lHzj`<~06J}.W#>!NAVPIb3ypKzC5XRL<=+a\x-z]?vbܿ" )mMŋ&dlp^@}І am?w%@Y\֛=]WxP+GB,* j Y\Pb0܂ x r0L W$&d$2$u :|LSO > " /X Jj_UM ruYQa c}oTy}#d̗$teý\X(`u?eIJCP9pJp#zLVpaԄ#'L hx< @iz:@gNvP!c67UI~6һҪĺR^>'󉺮@U+SaOrpPKTnCϩ".yLWZ^,13b=e6hnZt^s#7]~Ji? X$աwPYo<# Aq[QőPNɜר>t/qeRJVϾ)jQše=a5,nwaQdoy?ϰq,lpjZ Wݤ]LVN9-)i:Fe=}xe޻n&/W3R N :ĘzN*ӱmiY%wx$-xBO,yՂy^l!iENU9Ԙ_#-RJd}_RM$r6"hCi Q4uo9`n_Й‡Qr=wN; jc6"lk~f>Q#?G+bRA)!Kt91BL8Em&郻tlp\R`EKFwŊ%JhFcN*:xg X29j>Mgȹ3->w™y%lGK0!`$ib;}p +\4Lf^7zdq9o[;Π+'!&s* \\"-M^;u7|&;]Fy WT_@z`KdG2Tݻ34=EϮ7 T\>wuHRr+ uEPFXϤ[7~>¸ 1 B6 !q*`M$t׸T5O|^cC`%AI%0UJń@k9 U `.(,S 곇Tزz9m)R>`َ/jML}AeWL@D$CF1=r[wadr$Ư;퀂,zMz[L~WBba6Q4YgR2`&>BP2A: Tp l0†D 5`D%o z Tf꧵RP,7@,eF#:k$fL՗`DQIVOQ٬T79R!i ^g+oNJ@\$t:c]ro7+ !:[J;|hr# $jm\f2?2p'1(RF%sAr(x!|pگ؁x'299W֚ '9<"2D"&ӛtsqÀT.Q1y7ep>..NJ*l F`3aUwƈ|ngF1!eZĈRPh ci\K,O[h)B,ڛQVaՄǛ#8}ĺB9E6tb2YN{s&I%`2#+&VB]n:P[Hyxe eT:2Θ y 1 &e&<$-htӝ71CYb8k!I?|`&HVN°Z>bql":ܭXUCaJb;{oFV kb/I%O qZy`CJ=PJ,DnU\ b`BP"=lg65jOcz-9YP&Xmk n/ݻ 驼q{`cD(vmsHf 2y>R|/6e>?F\['sj6RMhċWhׁ5#`K3NgMs-=F-%LK ϶*yZJx6qk)eZJETi)OFFŋ2YKg-eLKk nzcR^r2J Hk-VC۱(m5<[#_ mXf/#] mP͑WoG=* T45j*ʏpF;pOpF;C_ПĤlER;֓*'r>#R?<#YB/ٳMb8`W6/2IQOFD͘3E( bWU],V|lGc2r&F\jg}̤ah.M:\zy h4't&QZcHĶ^P,5!oc 2BC%裒I S"`P͇c O=>qXY|NdkT3!X@dQdti,&\$$Sne:WKМԌ7I<;{bKGI s7,Rxp (M`48EL|H5*Ww}v1x%o>7|R_(D>%cA!p Nm' ΔgNc, NB S3K8 "E:8|>CZDy̥%x؆vOHxɠ9B / f0h8 K.-.OmG2v~zlCLJg1=,aezl3hZ0Jf e=鱍pI؆0azl3T+fzl3<fiޟ鱺fwRO2XV6ĨQi쓑(9's 爰ڒiƌ)C(bu,Y@vfdR +&qI!C\JpjKգ[^N'#eH\%Xr:֋gu 70Ac[#qJ:1:7>#?<]/C>^\8DHB hȁH[%N"4@LgՒ;S"\2"`B>HٔFRJzp u~96y''ɴ40 > .f#GV>0|v琟1* vvm !{S ɥ>x L]KDP)ɥ% $|O\d,7'@^54ϵ&t\`3gM.)(mzoZUS^(=)J>Z3WX SB5ޯV)1jjjz% ՊU=cc_Xs5٫V%-X K1V\i|wى5' XLc6,)t F@'TwM 0N?IhJQ`xܻ&Tu7p F`גI_ 9_I>?|8?^= ]?J~fhQ\^Yւ#l1] p*bݶT'8<7[P)~ruk\Q[&Pݭ!_KnC.zNtN?d{9&m`u}VVu++$e !pw d B% -..J7Y! xLr~;nMyז5:ԩl]b_ 'DQ%".p[uZvŶ% ݘqM'0!F:jĝe KwVDr4+ %.ij|kz9$9wbIm$ )w,Y[J'ԔR魏:8rxzG+h%YexSJ)pW+sc ˸`@6~xՋ8tmA-_磃Wσ_"\}Mrmٹo<:&ZG2iaܐJg)cl[F`cT߃D]]'To*Q<::&S_سAI|{zR_0^%}?<{AD הbP^g'Zy|>zs\E@117 JP I+H\YPG&ͿlQ:q^G?޼.{v›O_`vw ǕGSۤ7&_Kc\ǡ-. aiz78~X1[mM1 9%:ggCe-cǂE?Kɨ7qy:4a}L| ^yS*_Y;b ZW7nb]Ux7pOF`GW,_o0<38o?u|kiž?\H`nt:ip(a<|Bxl:9&@-`*Uat3~(|[yZN )wɕ!./ٳz\sqxf&cڍm 3.l#N nG#Y)iG/KI^ W{lx^8dFI [ .8AcxaOw1l{^ S;@ޖybbWf ~ޟ!W>$QXcN no8ԑ:yeXZ䵟"eRU;#HT Z&J4i5e*)ɛõq{n"'+p$֔%Nhb љ@0>/I~B~u4$^-V|U^ji𴢌=!|v1emH̩ď"}+mw$> 4sN&㻣P,|藌Viw-'݃]~?܂sޒt͹V3UζۏĶmLr5PYw&v^SU2&+r ɸ7%?vaGq1񭝟GnG x(ʼ8:布(~oR @5/n dMoQT,Af-f72aA9dScNģ(B0uv`yt,PȎr=uf{m:9gZ}bǞ5 y! ߿׭rP5Uʁ7u˅Qc?MPӎt9f9/PZ7x=P jQ ]Bফ<9-Z4]<km͠[n2 mB4–FeLv;sx ^Z>S'DNهA&Aw)GaNc h]`Zgs*j*R_mQ<.W ,}!ɊwG~ǭ.]S5<=^ˆi𴼾bi';@5gwn,MfaDuպ?[B&ezP`5P|UqjxK.VfųFϨ5ka.;e.0ͦ-lr ;dK\,E)BlfzӐ3`Ac`he y;{Fv׾8X첬z&= ޴׀2:.oe~xL)Ksuu.JeRؠJ*-.k}>ާځϫw2! xs]|.wG^X?VwEy7 9HrƳ kn=U5wcvrB uC󄠌蹯>X6!Csv35DTKIqu\DaJi$9#XB(IZ 9 $XBQUsb?`u`=f43Z҂ReD钛R*B1iQ1Ƌ Y%%"I!&-57^;U1*4t*꼊QZ4ȩ?6Π~.Eaܖ;Yo3*WL?V7.iw 1r.:e (*' `j'$ݩSf"e<C.#H]F2`W*Ts)Qp%<> ar#4*rͅT자[לBZ]5ivvm:ݵ+߶jqmswo\\k҄fOK:dDh'ĥF-k\M(Ѕ3$pN@ :*z`$ܨ! Wѹ&&HHsQME5yxAB kV5?y}v@xķ./mnWz %F l;R~Ut9 Ӝhsw{nnQμx/km&m7OgGA3q x\| (t5Οa^cC{#sǝ W@)~h~y4BdSTj0քaS9gL/lսݒO{%uz%_?}Seǚ%ìb. zzp+v0)8X,7܊M!>: i Ug&Z!A*LM,)GPv@uI S!0}*:p5іk [{^->G> ` -yRxV`C;%GSn w$7[R+r`b]"Y.G3wm#$fX\_,UWlt=TDy|jGeYq\A7^?YM7RXMamY9{^ƫ] >Y|iX,k\:Bh&my7`O:n#3FS=o_mݺ@h|ĹM"vՁtbEh%`-ӻu7=n@IH?B2`4:)ԀB%E.0>0-sI\KFD߁s %eLxRpb\HAd2KBJ?RL-a|Y0:ǐ'3 69`"+b֛1fL*A ɒ QKn С)nKhN\): Me'+Q+2Tv6MpfN94hMeqNH(Ee\.sfdkZ[cRﮗژHf9qsKᙽuDr(;8!xY/-=tL_1sfpkzˋ+o~cgiY20ķ}k=s=gJ689 SXc:衸z;עCCk*ZG`? )m$ct89FZД8&&ZMcVMP͒) k~ԚxB87pk흶7[x,s3w4b e7%ho`7</~U6` 4]1ԛ{nδ2B6tN@3O=mm#Xt5濃X i{ԨOj(S5dvm]RL1oCKؒ"Όz.fKjSzDƎX!~#ceT`q'cGy”R_ ]v{lk:N7Bxi-ۻzz.!)8@m:wՁtbE8vۻŷmxz.!SB/X`9P,w{r-P@ͻka(bHPyêEڎ:}]Yճb E )2$)L5 P[Y/ K,7&ɱ Ann]A_@njdm8?ZG~wǝ7\L7z]Y Vɚ)F]_Z-e Zh=ͲNd[@o#kcYxQ GV^3.3v fEn0ՠriQhRg%c6e&'JAE*Z iELA\&+Rr$Y)]fe$ĸUMCj箛̈)'9QNa']?5ܔ(,&YDB +8&R[ϖǨQ+%bw%PۥINzЮ;Η& yik]%5Y\ -gcI ؋fw!‹?/F_9`$8ɱ(q6*Tt}0TZdfZJRheK}ӔZ18;v P bJ:F!oR (=mgHOڛa}Lg[I u*3JOs`8/+~%KEQz(RAΗ2R@!oR(=9*kJ_Ma?,MSj+>Q `*JJI}%QғF[{nnE4|Jjqiԯz=]giJ ғF\qH9 ImlX*(=a+ZsOiJ {F)?bal(EҕʜQz(RNnWROPʫS^'R!P*dRITH?ꔗQj+th +aoR(EC)bKMiHmIԉԏ=vض*cCi%5IuFiTxR`߸aL O_ZI-mQ VU'Q07[RuFiJa70 C)Tq g6J PJXrԓ@)JWRkNCzsW֟~1()oF+DYGi ua:2,ܿŕEFk{5 C ^]×+oǏ?_,y|Kņ:Mbj]c䕎;(4JA04/42J-A2Q:ڽVŷUHVy5J5FZw'L(ͻ], yic,\bq[~sQʟ+ mFeJ_vkPV*98%) F߯I7 HpHxF7oT?X唑R24aN(Fcw<4c{7^]Bզ@ q?r@li_T=9;{31Zc[Y]Ӷ&/Z)o̾NLy¢yP3%jJD )PUx១z{>1^tR);eN gծn‡vU$*1M!VMHQ5:SiKlθ2:nBC/s./s-kPB^m,[/ݾZh!pRq@ ϹẄQ}`\A!Yf N%(+DYtĔLQZCS\ŪC™w-:Mr~Z:ooM~*đ)HPn4̙E!xaм@tS˨ɅK|TV HFsMEdʁI:Fhs se*p%@ ^7uo9Ai7`0.+#(=T̍ƽ!T2]ĪoFa"!aFNp Ji[v>q2~7)BNjs˧'o*]wC}) {j`5oPg?^|:FL PN:L>||Wu`H''_E)3C@LfI{<4w@p#̠,Mד./q0moЀx&\iN)hTѡһAAAd+Ǝi V# V.f@W@)G)HGN^?; ER&ڳMr7*"+q*߽*޺/hA6|sn  fAn pC]f@Mo<@PC |vonLKf} ՚үȆMf\ɡT 6 `+ר8O+y;HΊjOR뒳u5E2DBj2/(Mr(%gkYH[\QW4-YQ*|5&"2)aE/o\)i^>M:i6OlR;MȎ+^g{ȦL·dJIl~f1[*ؔ)R:Vv6Az,c3i?`[|n*io|k*J1.XXC%RhF-mWcUKɎxo9+`⣿'zQgq-Xq2GS[gʉz{X>ї燭us~D6pg %W|fڈkMkWhXnZh L hҲ21ҴY*w#t (WpP!iR \IXsF⧦B*CtUfVC*Q' I2s|nĤ 1TBXQRz]Xa I!I * a^ y$<ӊr2: ( ܀J;ăYQ/x^xB:)K,u M}&C.C2@:d&ϝBPrE80a.9)g0 T_B{8ˢ $ JODۅ궂h<*2*a17A IFЃY\14/3T01L(3bh r7 bhn(u"2%$~5e⋽0 ۜC)0uPKąP\ |_{x cNTLuhRY$a5uŚXߵCBf\F1hq2E18Xky!$ڊ'r[dtO g$,B 5$ jLJԀ# @s:B\ENAeTz]axrC/*zκIIk,ƜB* ǵ#3ZC+urngJ|e b㤵ZW"- <LNB;<7*/WBp(=EP[B&E7Bc5n,>F `ޑKV#G]Es! aD\4 k z)cVLOD+KP\ uNKUi,!N+x\ V LAuⵖooJ}P4vvA-;ZTDnfŭ4îw? ~txCID:  prBo֞ebd!ݹe5qs[V+^DB(2HZt)y/pWE;\k,ڕxF H!f)(e[pݖ 3hav%<|fP<@42Tnd ř<Ȑ`$({34 eL؜](ŷ>8G2мQR:*xj]ȓ N.GmߑE#5S).Б 9Q$eFiHxuAh$4#(}810EN+y㓛wލ&ލ@G @i0Yucf\y&Z֍`d|Ӣ#[5S 6e{j،[ ׺fnݸxw.n'֍8)Zq֍q .WM5AѼucfAt֍m H@z顂CnPS6B uۛMA̐j!FE5ՌN ȥȤ 'Sm!@\֚jN"|ljJ CT bڪIt*ܱCwI";t!!\DdVaߦvSҌAb#:h݆3fד6=/Ru!!\DdVfvcBb#:h݆"Ӵ7~'Ru!!\DdG$P g,OjmH,\u[/[=%"! bwJ<3f]=M^U |'3~SSkn6LqNQB{ˁ!gԌ)m׀B \nG/sm/\^D-*Gd;@A {ț>vmTQhGurTsktN_@^}>D ssop ( F^S l),DE(;ܾMɆOl y"$S@Goklj7U?vK FtRD h?*5K-UWvBB&ɔ`I6n| -I}G6pIX,y [XRKjt jU%KN?4KNR;i)m&-塔WIhe0r?GtRT;J705$:g[8Șj#Z'!y6[Zr;6A}Vx"k*ľLȭ~r7W{8ZQ^NSTSMq1*f۵pŌٲ?C D1g=#gN΢׏_pgez+;5|jo}[~TR͋kE;2;0p=EOe aJntސc@Sn"`2&lh' zFa=J2!E4ܢ*!|J&xW8ai봸A3)^4σh<y^jx]uXΕܫKƌ%QPt,Ufb)tr2E<W/:,s:vAm{:E/䗯JB$7~1Jg-څ ӵ I a5U@xSR"UN0p-]B@an^Ы声wQ>?04(1訞`*E^'^5QxuīЭp 9R֪ڬ^UU:W~nWt-R HQo꽖)3P4@(mN_?v5D4[Q@Ei6 AІ 3-룎DKl-,+eSc44cZ!j+8HA5kVQHOP~xݰVshO%zEaKSP4 SR[{N#4PyXe=3JR0gd6Wŕ ,Ĕ% S r_,+{c pe(eg0eR'[M7@H5t-F<C1%4JJ-yH g 0K2/ 2WT%t4FN$n+*|ݥn ![ FmU"WV).257$bXp2k ?\>[ Х(?w{s#l_y3' +=u&٧՝$?k4 G.k udN'ܶ@iЕT+=%ty*"I+!Y>H+&$RR䅣T" sX@R<gYؕBqAjB +I=>&29aH!EU $ YN1'bIJe5+12ammyu%A琩FV5e@31d_u@"c4$:4\R}@Y!xݝs:oFθzxgkT}3ϡBqpϵ;ݶTn|>gIZ>dyݍ脵G)#LMM DŽS7<%49I<1%s=p+Vtʀjb]'o[Nz.kv!]x'"RwG%X`N_}z5@ t/СS jn$4J*+Ab.duj3 EBբ[a9Z8_凋 D^chq_Ovؿ|]8(cbiǟbFay;'e}{}$XkTU͆~E&0ٕ2jSFImtBz(eLk|62R'C)Pz(Z=ϭN(Pzi3Qz*+G|GRZj۷tQJ]TB3gu+"J|"ke5'>I- ǍRPX$l~-Rn/]*n[yvr5| xBس*L:(ؐyVg3 >2ZSsxTX:J{m=Qu]^p%.yO{,fll.tw.3Ve}޳ ¡%t?wݗ}E?Ev[X..b;QEQnC\MLS4xNyǥ&ڞI캂4F4QR0ئO 71.SHQhG_JFVI4;8u}qk8S. s|q.k|4444ϫ\7..g*V0(f T yγAv^"$8ӹ10M9W9p@d@J !2o8f( ǮM$YvZh5Ui]SykWOw?ó~BN~I!"/Gws`[͉ *v%ߔD& >zN'\u ~4dv/o_=QdlsksN@dNfWkcxm)=I8h%䜤Ѱ\otmm*RFRygqg6rx?[G,2H6|r&az'7C~[!:ܱ.PG*ˮDHrTfڼk\Y[_,o{I>x$5{l";?J-CVkhjtwDD%FW}`kx`>>/yp; o?<.7Rm{=ëؾMdzh]2}t^.F%ӛy Q%{5nl %{3'`,Bצk;{{y]c67$+u0ZW{}0@KPI8-|ݹ8 gp9PCVwi3@C72ٰ\&v[Mu'h|^jk0|#dzg%KLݲR }lK!\^-.A/!$) tJ7JHaCHs8i!0L] ~5q>?+2;5bT'dJ,ݖ>ZAC)<5;AFp5Ba柱8~g)]u74bOU~36^Ee@K4HSR$2riߝD(Rf?E86JDIJ(Y}ē0a@Bߝ<2S+ĄlM>nA81F%9O:(/8|aZo ֮r6fW8bRAc;|nbfYazӵ1VWM?Mnq33M֜^@[Fd1з7޽]Ny3zG ϋo?êY 󋷝\5ol3HՀӠC:(0脥q81n| .E1f= #vH-WF1oд7ʧQs!-..liguItvR,0gRUl(m zWK4Ѹd{hcH)](!7&]]N:ewkI{;Gnn |uaR:UƱggsdvsCvjPА.]lUٗir&QO&!-Oecs%~E;q̎ˆrƖL ⬧֩{NǓxg} hY_XFC'Nc78:q =RsYaT0&o\-6Vk>sqW+OaVBw*D +yrNI.ۏw$$kZi)<*W)*@h(&L`A" [B*$*аl^oRkŪ@HO/)Kg Lf~:$Dc`QgDiȫ$CI'bPLJ.9ԡ Lt)9/+f_EPe·yr5~7ajnܯ (ݓotʶ:腪 8(8LUT"Go_IݧiZ``3F0t?5vrmaEɪKfWZƩ7^sP&F$`$aI$I6v6CAF*J1]Hy=} bV^ɸ%8CܔPOqxE+*Dp37(' R)j{ÒfgM9sډ}!D1!l6J~+N Hq8/z.``l_q.XyZ;DHB&g(JЛt^75ѰW>BC lWMN6pfՔuOThWgϰ*N3^V:CE앥BjH.)q:DJ8cXEu Ny„:_AŽ5 \1S@l*PUHTyBdbmVC.R/xJ 5L5Yj҅PQXl|PPr0XzC פr*Èp2;X60S8CUFf#5, އҢgѱd 5 Q dX hJȵqb1ҙTgbZ~UX_Q1muWy 3ń&4z<*ZPLo/3ȨQ1.-ɸ ׼NIq.\O訵 8S6fnhUr.=R')J: ƀ`aD=FY~Jk2兙B)>5.Kfƿ̐s S֡߳?Ut0ҿ^Lv?,Q9P:>ҿKy%pމPꦸ|jI#@q'Q4)OnbBɢ@|dm8+ҭ"@GJ߅ҭBPŲO%PBzg4ޛrdEZS;7StTz8NqA[/?;PWlݥFM+ 8 Vȫݑ 8-bw|Od{Ѳ@p3-ZGT9$#s8m*^u<2C&L!Z)IW-W,AtJŻ06m8ۻgez>CS>B~ݚ n:ȁN7xzҍ nYޭ|ma*3,n9#=s=oy@IOk@ pIL4NNrb&#.I/e LwTrGxn}d* MצB7K aϱMX܍Yݮ6Ƌ4FJO$!dxT(a@V3;eAƏPt$8>$r7{2\2FX`VBvjF<pLUTH;رqZG8sD{H+JN,!YX{a|)QmR_4=c"3&uLiHDȌ kiңC)'yǼs3 q5=5*2xW65yۻ5ƅG1rWFp6;Зy'٭-I-e|k̂fҙQ$;Qvh2#{w4R0O{l1Z"S0k;Pꯓit;BrFsU W͝VZ[}oۺ=.x-Gbjߖ٭l4Sm̶xgQ|^D7z1%͢H[; ]9\ oF9dQ9IIƵb;nQcJf7geϼuTl'௳ӫ__v=q9ԺKK.ul)hI #*S .xúiP2sul`JK}іjy"K:nfϥhI9 f=I"'#JC)[L6 2Z|qXJմb FR`y(:Vx"l:rL"iߕ6]W)W߉8һΧ Fۅh K!l3'kSIT!lW9spfO]PD} 2 _ P%%1቏4}{]}E;j\  N}ۤfrƻF,ŀDU7?=`lo#&Fd?뻫r9`lShC=d`_&]Ly,z# F/ȀUeӔ$;a c!('BeI* W\+νh#@3ggrtZL6+Сf\_x@T*TҡVVF*啦(BsuE*hm%jKϗ_'p磆ұw/||{{ӏ:ojZ3w˿k"iBih:8 @1b PCbAў!㣸itRDF6YVRQܥZj*O-.UdfO qodi#A<&*$KPW!%45Dv_F(J2mjx>~H>_?vW|kfةS^~󷏯zf"ncD= (M_ryf\pJ";?k_Sn~4wWWqz: y' 3A@M1lͭ$&7Ks|Wŭ yk-/սmKջ8jW2dQLWk[n$GГױd#T]ٱ {b6f_7:ڒG-Ei.7P"YB(ra5ɃDvR 6J]̪~X~Z~x._;aA ڴbaa|mKmwj-pAIw+0t \]T/b=es M<nyc3GXO$$Svg+i0 ͵ {5^x֕u|ާ+CZr/-~KK.[$PudV\==D0ȣfrbo+|Wznp/n0|eV$ xZA}W:a I$.Uw6徭BP9{j.c{}` H[y{Xi^ SK IINsoXv(Gjo!bjjb/VJDjcU}"?sk6Z8Xux??ŦNgup^,36_ *ؔ*OۛhDo*%_ŲMOŖVE t%(jA?ONrfy$61^%9I/c͕MrDȷHM"bCoDx`-ܤgt!'N(Nq|ƹb1N7Bۈ`-*gt!'N-N ONcFjtSoP74j<ɐav//,ׁrRIԯ\j Y(E~lhyW۶ i 6 /Nv4}j(C7b-[np Ni߄nS/XƸji&H#;,4rQ ̀"{MPF45Byt iHKuh.@o^XR.K\C&k7{uIrrM1;Er6b {gK^:sjtnT!Hm5yLZ{V<j"b2N5[U=:A>  Gw)z $PO*<{C %/xd 7(d FJj W$2\i=Q k9y%MT ifY%d)0Gɝ/{b@O)btDT"sEB -JE]IR ZCEMI!(Ž ަ0Oڠgjn2Z!x"l]呖kow~ÕmgFzvK c?ʊSQLܐp0r/Z!__@ G %CIÕ'aYo.|/I"F*mLf4R Wy"_u -{AۻlF'_|am8l4[[ B4.NksUmVKK ks0- 5`ڱZIEG9!TS3\їS8 8Ec8L~ۉn k #FX |6^2c},3uh*qF7Sb1N7BۘEEcѭ 8E8Tߪ/>oh,"=as @0B 6.nI]o|HOصhjKHurv-|z!VRG7a%ʱ\rH)%JMhpI.iV vJ]rxtΙNl% ) c"0|Z[8!>N!'N(N)5z7qDt:ne_֧`-$gt!'N(NI|F4I^Eb( ѫfi֧Jşz!'N-NyjjĎE^QJyȹ҆H]K1IkE7wCh-5ɮf@ye'6_).~sk '.gpW'O P==~ۣ>]\?>~P/.&auq3Ingٷi7ʇ|Z_w~_׏ # U»RjpS_>^<а!׼RӰ\m&aqF3a^R$;߼6btr3C }ɼ|sZ-3= 23{#ߥ'B2#]z5\sx]"\">lՖ@.ZP>,tmQmsZu -B܌Mu44DHtNp>"EMƥ=ID4-c(bތalGUʎ`@vE{!sD[{""6m0TXb4O(%dRLq(ub$2 LΒL- @skmm^KPAY*PHyN)JRUj. ,|DAr^ pY2H2 aO7!}}Q>V+O]9@YDnۗ_͋2y}LCimIfށi) B B\1rQڎ"H Q*奍]=ryA Ky0_ҧ}@]b46J6l GU|={y_"׳{g,E3'X`N3R<#D;)Qg*s['1wxBo7g?޺EUYlOrqrؐ/ňw#ixM; pHLZPo#ByH|A.pt)#KTRZu:´%P,Ӕ <΃ ;xS ` 0eY2.Y-J@vڟ<-A,8`H6++!lGěueWѽZTt^vbn.\pR~U@UJ>Z:YO}Fpqzf HtFoKIH=z#8AUѽB mChYՂy_ohQ>/ O MDR`>I.g粿 I3Տƒ^avj$`8azJIt_l0Wk%굅$fy.B]ΞWo˧՞/Y._[^颺|XydӚ$-w>;|v ,.vp,j/2LR۞;gEf3dWV[jƳg3Z¡3,`0x ? 4mOMZgyMf)pra/M% B$4|~x\ d:MU!ڏ\U!VONf3: Qt7dPz;*JӈБx/ǁěa{,6a\az-}[2z/ @Bqv}b9@NG!H4(}NP ]-'нL;>Dio5ѧmm eSPэG4!*SZݷn|㩢CNQ2rcntӵ K#FX |6bENzF.r~ƹݤ4ubƜEHG[E{CNw%sH+ 5 iWUeu  \PsO_Qg‡> d%?".,]$JSW|3`d?61※?I>]@-g߃ZKr&Ƃ! <əN %Rr&Z)'jC噧hڶD>9`cR 2d3 jjbL3KR 7 E|Z>ο$kzbqH SM5Vlu@PM cy7%FwɁ*y.Q}\RK%JÝBr~M#D4]t5ɱ I}Mڷp]In|fugu3#K>N+J 'A$A"_U^$bVDtBׇjG D`δvJ_߾1ڿ[aǠzo߈$fl"+|,6KFToZ{XM,&X 1PNjeT"dB-%eDy ZY8*5<ܲ|eV;/>wJPCMn=sv&!L= !s~P䜳dYJ &D7F$_W$<w HI8Vǐ@ss MH]M]na,:'B_/uIz'SHY`S-'ӇeX/*Vd8ǰ YG ^珽k<댎k@ [sݓ.#?&OH' s4DoGް IX %rMNY#AP  N 6Jwot8Zߖ+b9;]"; YNq+K})T̕Ee=VPD Sy'SZkcè=? bf` %/ؖ/gxe0ՠ(mMOO^vu<2Πzr1[\qkG̹iu]Ԑ][Tv{-0O*N|,EF!%M2 ς`ЊMрi75w8g0_!Y(.f2]ZFJUxh聘bF_i4̷GnKZ_f[#!)U6Aqhvc: lWjdeU`^6d7񼧬I[uf sx/( ǶdEz.~#R4uS`6%Q{,}L姥O3+P>֚[ B$jvV*E;b'f)JyX!xy*ppܲK5ohc5eJ쯁vjw!u1b46 FjG-1.#%efNJJCvU<=Uop#g@<_.rhS6Υd",g{U&H͡"r52ݷ~Ym7JMla.l6|+_DK[Z@o=8`L$Q:Jw t-Wͪfzmhmkv6CwX/fmY'vܶz }\Y(AKǡ u=45\>Eݑ}Z %|\7Jlv6ϹAD)03jt u}^mUȾb49NbBybJ?qy-WeHk6c /Fljm˱4>9? EXmijט%ʎIj(QG^)}*8Pmt?p`ݨ-~IsδWj|sWode8CԡD IK9wJ݁yif:V/Y̹Rɔ - ^fo *rI՚u_sGF;2xRǁdž. m/k`Y\cݞ0X) T&ae: >ӊu>|qLAvxb@e mI_l@;iqӉyuC#Ie6V/T c(r*ا* 4wDMbۇwc:GB+%WPl 츛v^*D)@.k׽v28ޔGCoi".zDp̂(bl 2c{ۗt9 <!vdtAbme:H:eѷ09nh>=z -}gg b\8SDvO@dLzjF5EXs]#ΝF5/ѶK]"m*`1ajKcc1*rjjȏ8кZ'xu &ǞbT b&f V2;*$ŒVsc4:)>5tpf'輖Tyn`<" qL7cqT"LToXl(Cېfu֫2h8CZ9dzN@ =5&RV>gV(bs{l1F+/s4Á\1sj2{6QScUTT=m5 %$pw11w j8s SUJ{j4.ou41@?TXXtGZx*E9R x#4fAv_djpt!}M}LCX]!7)A.e9ho<:&Zɓ-9|/n>UWz\b%q%D!8KߕNuD"[&WF*j Y bm̕Γ0yf2'+Q z(ZTx74XQfcio0p2i*聍_Y_XKª+Fe]at:el9Ye=Ε^,q);6T8! ѽ Zb+k f1 ;J%1RP慺TI0;:z15-15u'kg{P{;W΂a29h6L=p}ߢꅛy֐Bf8]9+kÿjՍhj@{=]VEWf>MKeϯ'iZ IKzPtl,* `ŝb8>ϐBڨY-C-? #&hqԭ@CHQs0&'<*v~/ ]0nZBtܡO.fb]bwS$g4_=i +jqEϖ~yEva8Knث?M0^=X}'tz5{{gz06q*-82ezX4f^9jŘU&Kdn vv:8?]}p@x^mV]=JƨۂiYwߛgdj17.83ާqH?:oPo6y*vF Xc A'}Am+C*Q(߉}p[s M (pzV]TgFT>,ݤD]WVJr=- y6o=Lר_؀_>$b|4+g/~DO_J-;j_*BI]xOэk=Tk_|YrtIm^wA)d'#rI8U6S<4Si Ǡ£Vq 3Zm=87qߧiL'|`@{ 1twߣ(T4nU傦i\ԭ@{P׆k>YbnƳU.b"2p:&yP3LɝjLTZ,19ۊg~jKi2)+V42=$;Pbgb$HƘR!Qi5lmV˹ V~jsӃx2O!*~ɇx[Z&C~|{zx7O[?bzS eI?<䗟 xX^q㧳Է.#oLby];'Wl/mZLRE)+7t\Rc-+AyjVtw+yb+X*_[sѻ-gKwU:XX5(vaQc7?o9HYڀvhՇR {@6Z0u+p B,uY{`T[^tXFg-Mp=V+fU,p_RsNHs0KMCR|0Tq4g-`CP=T',j,v 5F1gu– G[Q(۵:y7: ϊFwWdL^TBPpU:@T" Mp㩚2b[t&ࠞ8HX+~< T E+ZIJ87tsT'KJW'J:o>M~7A}xj~_UߗFJR|3]fR 3xiiq~ƅVM#eLshE] \=a ~<(.&n~%-ewJk;\;\;\;\5 a>>!^hX r5,\T!d@p &iF`|>?oКՖ)gʠ ^b Dӳ<HP,9@&7Z[q'7h@b*.c8u"Y DJK2uq+-mA]R|gtFv\$&Y2kZ-~Tٲ8!l KR#k;KYu)X]JgJn^ /je+#hDZn2Ak5\JD`FdSZg")PQ1HL܈=?Vmb*F^e L?l,S`O*Hh+&PsٙWy1EYZZ":Ҵ7K^:޺ TF: -rvFyE;c%ѹz^⌂І3$FRʊ5US 毛 gd\0wEM,hk6ej?VH-!#M&sC926/f7 QWVlT\L'_I+,h \&Ǹ&w75o5q?/Ot#(r!g PFWD"a00بu}}1өYYG҄2\FPyK9&g"I+p+ )!C~~2P58UBQY.8bU´0Y( {!.l.'F4dL(/fWNô^aNӨDoAX+d-|&秹'A")ao,zw.nt QgeG \;tGLP-Aci_K!tn/gd +hIPhdH9Ĩ,C {ŋ'^cgѸ-9 0C`&.{4z+$?{ytG\^փOcI>ˑ=؊h8_);S׼fIvoEI181"y=1*i#>Ǿ(Q`X?$*`>d@fK|y+#uNXdKWL,G7DrdAݜvDC,h\<|ߵY-[Vs9c5:=^{څv 4t"9rq %9/)si\G>s[ri|qn+&3) ?^8ew=}]kQƘݫ_ܙA :5w,}큜VZ3畀,wN$-AQYMs6ɳ ]}y?^N?0A,A^`mɩNjJ0a[N21NhdYGR-Wƚjj}g#~_fU.Ge beR !ǜ9XϥU|gQFe?< %2S.IW*Y!1ۣ#b3!DネޓjZ9r[*<$c@]p|μӵhu _AE](FWԄ{n9kL%VvI̦6+slbITfڀ̿uĠ%۬9źa;AXQE7RYc-1%MR{_팥X>%QԨ¤H1h0v@]ĸ_*VnMni" Gɑ:RJޖqr004=.ie2QFwl(+o!V?J@tZ!Z09%FVYpˮ0F2˖jq5=d~!lrVڲ) R &th$Q:xK>[9)sTZ)a$9)}r:jcGkVZ[/ko]1b) xc$aO*ª E@(ѪIL1Hd܌ZݙDFK g M"$U+ޡyWYk=ȲfgZH.!_ii>jhggͨOhnfE6hL*Ȍ$>Lu NCyx@~ X?QPK X7 X?bBF– piͅ4֤% oo%96 .>ʃA N*(Ň27zV0}6Kr(r䫂Q[:/yB0ECcjɯ#-9{ --0  ]=:C{2XfdjN?ɾ.'c֛~pЁ̀3G'> "Ghmh+~>N4ԵHhlM$xC8O+"V_|Tʀ\Cq1RP6d64?Ȯ/kI ǒ 5"Ks),{6v8L~/g$r`RΗ lIcfb4>ɦrg*I:8 .->N;t ()&PP@c5{{ٿWmg6Qɥε/I"f. {lHNQ|/ 8pבlYɗ<]{vszŇ ~n9y='),W ۘHBI>p2FYb12ҟd (xpyfB'"2j\ %h1itG !m;-ՓNn9!ՏhT.I+t d =9В>D icLr!|W!J`C vi1PVm' CrbڪIޓ ) F]X >W ]#g@80f3gA3xZ^Kj0 'Hd&}Z9=zԎB$6ZƎPqv[J 9d$#ŰhhHd4mO:>t0ikJbRׯz.uR&Bq7R*m7)Y5fJ6JݤTokp#tݪZ(xե-⃪S*I)V~muxRĭ~ne&_U ުymRaI0U*i6BJ׭zުV`TnRڷ/)Vi˥Wzl\{7\r;G"qHH;;W?3^Q`vw@uYtܙoXz+͕޺.0ji.i4r{ݱmܮ0Ǜ#)6{ -'_D}֙UT:pMx0^%ͣu .,P3\.u RpHWq)8%9"`3nI)/Ϻ>8@ qEqb Ȝԑ Qs ?(4r,XO;Ъ!xb/ZTkg!8jϭ̘G.M\xIި)$]a鷙/")NG íp[MGJr MG zAsBE#?B$qc!:M*ZAi䴣ͪ2 4iE=E8~"9"]IY&ժ"s/aLSr0 MO'| B]/4 F^ %4k)r SF7k:+s1ֱ:Ynjbp\2u0zM;>kT)d6ѣL#5{ !ZLFA~ )8 [&¢POsvL,?1h dU=fjHjH+O`)0~Zh w890^!u,Dvp6qGF`0x|Ym)$gwXzB=|Ycd"iCC\(79]c koP`Omh$]H(q ~y(Jiz7946QpԔAIu0W  NipZO!;K1}5ҼWFCߊI:.-v*6r0׵D̍?u8y^`(z53a-Ռ|A % E Pf2^K!8ԘXKdgN*:Vq'._#U'6.&8ep. =1D =]mq4mqi ={rƪmq܉-q"F6^!Ӳ&akow&y*j(^bD?ִ3l^e**K>[UTN~*fկAݼrǨxKQ/9$W=oJ?qmë^zO4Xi`f@i7bڋOQΠ5ÃIMjUuBAtz@L 1]8k5+*d60@VǢ9J^U^XmLK!HpܠDK jv5|`2Z-8uWBG!/2>QdiȩYȤ;FT-9b>!/cPTej~$_1.B!kowt vkN3}7U4O$BJ/!5c>E.KG4`sm wU'6=oYmtFN'=pؾLʴq^K?ΌwY7q iT[7R۪xxk?62[mdƹ}=8b(: G.3ڂq Nc1e > CN޽<BHɀ\[=.O]&zt )Aly6d3Q[ nLߊD㙎vs47"(޽< x͐z>;m aoO"U duq{O='+Jє/Y8Q~hjjjM}@OO,wDNgexѡP!Fim{2* (V4?L7(YsԚDsM1T%Ȅr0^0M~yw} XF:_KwFB-ngM]g~w?ۇVW4,U+r;~VVn?c9ړDt 5RJV\r\JN.&H2 j$HjM7om1z65!FVVG r.yrg"Ou$ n?fvc~5ϏoO@S e/ $ѣB).R ^#47H,.Gg|~#Efڜ#" ;( IHT®+%:Ya ǙIKn9hSEa2'U *< }=au:F ;q%ThPJseʡ'1^}yNq>\:ԜP$)d0VXSGB>31&a =g(I`\g}HEbZC+b#j{%;/bf z0ETT(Fxf8T(Fv`a1_QRLT=qV(F~,%*QhTVy@Ebd+b!>#ȃ-b70NmJalA|FKent ćR+@9Lq4)Ft{>Wps67Q9\Y2t"()C\⾂G8@ fSê(%>׍(iQE4\_٫b=g^n"[zU nJAY<=puЦ~\c1V\L'CUuf^rVT:9CF6 5'gחCTl~\W|!0=8?^B@1ίnpH}WJʘP8.Xr=b[^U"| Vh́a:xW6;%4/@RSh̔Au:S_jdp&Ekuf87IPI ɂm0ŘiOn5G *x$˱Vv^kƨ'ET2x {uRf;Ɋ>7ࢲ@VDj}56O}5F`ż jW-yhhM@jBq3cyFwb3\vt$ƏH+*|CV`KEV|7TEy=?^O?NhdƈMd@g>FuR'@ *A& E:Awog|:YpZGԒ"Qn,: 2##**:7{`k+R P|ybŵwA⪔_\~M-~ ߥ}Foqk|7YdJ  d[/^6CU8;@H״![=YdT|2M7P*ʹ/J QʧNHC9v}8hj=B"dgsՙ/Nqti%q3'li9birTu"OKieiZ\[E ]ƕ|L.HdY3"hRvT7AÑJwfo!\5 N~:R ը^n< 1N{9Bߍni@ǂ)֢]O&mGOIu&3_3<4!29ӓySXYjIQrX-Jc9Zqz'7B5Nao(b)b.fg$Yk%: T_ 6 r "#ԮaA+ԏk,!9W_y;xOw,Z1w*[ ָ=xǃ7Q5>[߉͞] RֵŤVs]FoQ h 1a4XGcVl?׻jDFOR]@b` r,f9Uy[i N=̭ ;D8tFIoμPRuv]&8C&~^+Cnd{f;7 " DSMv}_ېc:;86]7-׿e0AIޙwA{3G)g %`^y.j;G^:̡r牽OEmL;{^E1~joS%o{v(*W0[oer]]2]ɎN CqPe}ӑ|&bSW]nN;bf9^zxg.,7Rr!ߛ8tfT BL'1w=R*֟^)pvBqmT %Xxf _ ۃ$6cK&zy0-t|'l T DŽѰmG}XCNtQ`A`C|ZTW~}yVX^_.f{|@dn3a-^]2pxw˽ !p{hWB6 'l='Uzvp!p1RTB6_C{ntQnX~p9vL6,,)`}04Tєg8\08DD@2-!lQ7 ɚO4V 7K&߸׀?2m'2k+Ulh%75Qe_'v9b$S1I}G̕)mm_I?W]X7nI6ޯg7q0N.1N|GT|/́7WR BqM)N?)0bb:vJfdACg.,7эmBĆ^%Q_j2S2FuFEa .s0?PȄ'vT&7ݰ(G',fu'ě`u{%3?M./a2bVV'c6Ây?trf YUh,pN8n`[W6]d4sv0=Bv$ 4FԮTR%^'$qѣJ6Jd#ol0z#]WR$K' B-wjbHSt2RGJlQ%]'RYO:vt#;`/f_ VhN1{wiźfף^A#P&N:Ns 1YgS\/}zyf{_V[̟e}(yQ8ã=k o|SB'R _NYawo^xdJ?{ɶFB(Q+S]~+f>?\WէϏ#dbiF2?Gj5*X&j&/!o_aZ@Iin^@76S[; ?#4In|$QR?Ǧ>CIEC5)-y-DG(&PVa ;ru"P=>k<tcxB<; RPP sfZ! vi䃤dxXe+nGΝuc8i +3 uBL3ޝ,g\WK$q9i1ǒ!+XNvQ/@ |MGȭVYs4%ɳkZ%>,[66JD%WoO,l_SՏPGMw;pXrj@l'^,=` o9f(ll.d@HwAV5]3mZ*7Ʉ)mӕ#&YI5RgEqSu[6)]%y~58uO]ԚRъXOY-,g"2W AF>IT]z) Qv=CdgFizE/ JrADW6%eBߢq2K疗dіsN3+l^_ "s.7\O抃@ZsrUvE^%W 'ԱxFFÀ1sT:*9+fHp\#ꬠh,ΔYg9^a4[ c"jWZk'8 30unFɨ@@PsІXƊ ^~ e-!K uY! \MR`T`rpZ__ziS~Z8ďYEPY.z) ?i꘍%QIv!TWP1x9+~P!#L5D3x!N*.4 U(S=8?^) ;2Տj}{8 M"מ+&e_Sl =I. O93T3E>osFq9TZQڭA`30I,;y'^B(y<x.Khݫ>liMzV|7TzB_Xܵ%Du^2t~1uzgWv#C1PȰ[*|LJ&[&j$uZ{:` 7k`o, eFTjn}fx3KEVVs//UUGGd-JcD]R(Ɋ֚ ш%yo],͢Owo$CЯӪh HL.A8Pu5JF[$tgh ̆{vp󼇐X-{ x?h)inŲ)pkXXаx Ѱܽ! [caԖ"䇭s!m>s!O )NFF~b\Zlw)*%(&ʥan;gCT0"$Q$3)萶f1c!km#WE{ cl0&̗&ml˫G-vv-쇺[У]:"䩍 5(x1?H#cBEo}ZыY"ΌRXbQ4MNrkL=OS8s(°auv=hT8 ׍:_Un HZ"ޥGljt qe~:v8<eę<+Kax9rq\/ jXܝyd?>SR7RSs=ZػYrb  BygAt&S).՛5/_^-Ȝ+u ;ϗ׋|X Ӕl!#m!!PaX/fakz#CnI/ʒ&E՘+0ִw`H/*\(aM`²=>e$P¤~FDL15L}!,;N(OL&!Nu6 r" ߾YofH(cc 1&7恉2ΆC5&/0fB jnRFRF8OSb9J0Fe1-RS$1BCLy7XBRnU}PR^WpODb4i Whp5FhzM bFţ#T>̒* OR!P̵˜S/E,&^cm7s[U*4VMH۬Օ,O1'F#O `zg{"R pc:d 5bq?IKSrD`T[h) 04X%~XQz?l2pXS!0{%p^XVhW)Ök(+aH)+;cӆ&*DB^cV73cP@,hp_"3x:K #HrITqڙf|,}z?=TiWqPi^f8&3 Ի馘Q-Ye_EDQ#JSq?q 057i_ů?[Έ;oïF%Cȉ397vHq@5D*//D+6Tg+u6Be\3i;D%suvuh ͐q*go1D5BAѮ0@rIwgD).v\Z 3VݝѮﴣhWwnbrFEal9ouVfe%(NG(SQ Owrt(p;^w,X:X入O:7/UV䌳r3~rSUCW`KЕy-Ս^_j9nV[_jN%Q +hJGѾ ^R&{e9/ŇAO6 EzUt6+H+j9չ(JL?e-JCDZecw.샦x$Z= 7wS?>1|1 6=O"#z6}˖, eA\Z_LWFRr1wT!x:NgTnP8~ޭfHB^9D`J"5gٻQD [S@3xg̻5?r@+hmLqIGNvU AvrH2S"Lo4nqei6HrE+bco핔Lt^*=^?X#Bu5hUBl\0١Q 8X|ax93o+wB-nj5@^s@?:"6!Cߩ>/q{cpm`H[1c!IʞZ ?im{**"$^;zU5ʷS=lu< M'0 i !$`xFYd1I#fJ OqϮؒUL81~؜e7 GW|U%Ž#~gcYMZ6,JeQ*۬H (#-.m!хjX6}| q",ȎVqޝ׆/s]dr~.sZۙ<C= .z؃f79 b˿/'=do`.N4Se͚~c֬ 8/.kjFq4'uceK}Q#zte$J LP #:cm.ny#)jx{z-لJJ:I Eߓtyc,,F}̧:{81),b=d>_f)+'odr?/O2z{,go*** l2 #WPy IG-"o"lejwZFli,'-Ģ,o\Yak4]@=0=ģ:*ŭMxBb]i9ʶTVSd$Jw14柍t}:| K5Hpv1cNɠ6yR0hQ YhbQFRpa:~Ug3*SS^/~6[O$SkseʜКJ={rP:' ,)BI)YX[EQ֨Pfc۸}o5ȗ{[ԡSLqFcPP D'.B蒞Z(ANIbAWt i(絶),ﮮWa ˽;=C-敆VW2U,*˯?9AqA.ƒHSUNv泯~܄ Q7w'qc> ~W7&R.G7U1'D0k3_rT;J)")\~r)$Lo+e{m_$CkpOǥ\?} <oR鍊*"bF_u bS ՑsLC"s., :++\=oT \Z%:;r3q=޷DNq[V~h/xdcl"a9vhte-MGkˤȀ`0OSL) =&+޲ijs{nVSLS۞_U!#`E%`nl&R!BAI|8CT_j 3)Ԗ3{nf@vT4Dཀྵ JT{S P f#`oMnpԒKM|X.,wFztF+Fs]mGG'[Z|(&ws+XhоZn՝[q,v-iԋԼx|K6 ٨eFlz3ZOF!1edΆY|09?Nnt;];z˗uv27ߍrMKtn~}b0`QW ٕ SܻdScŴ_,ܐmRĺ/ 3-!CwJ 5A t>woJϙ@uݚnnm C(|wS٣wk4}Fcl*nMGz6!ZSܛXfܚe(_y~,@"%V12V9Ssڅ[@R3 zChw\\6S!:OU!.ź 57H)6 6*˫ lƪ5_.ؓݐW9"@7a@Mgwd^]8 ֨;@ɛfzY=MPA߬]q4./Hؑ7,31\6#1J%(IJ%Ҹz:"Lùd\0ZN`~Yz"JX 'UZ`x7MmEK-PC, ѷY6V4}Fބ&!Rn]ԮUMBh3IM ǵskBžuI5o =j A("΁(EJ 6tL:)jS\:xai`t|L$e1B$9(CmyK1Ѓf2&v6 e*FQ:hplݯ7|P0ξTCa~}@iMB*Ai9Dpx,ƅ2C yS>AHbϨDiꭤ\jTr;ccݮMlt T̷VlrNE6֕'BHTŵ*cc4фReXp95$(ߏkt6xdtS0ܸ*`,>]\F+!})Bx.oF !Z |OcSDK1W\ j \6y.AcF cF!Ea3rXǷD)&mPQB @#׊:x%2TUd]Yo9+40*3 z_f<ȃֶZ>f}ɬuɼJ*$Vf\ #/UB5 ]BݩFur;n##p.jrsVp.'P Pfq.-Ap.#%Qp.|0fZ,䕛hMqnљbB*I}GwQH0^~4лa!DsAsØ:kyq+k>hQ˻wv--|:X/^Kn\寿3Ks:0{g HKU=m!Jѧz/ gp[]o~o 7{ܔ:uvXs?mN%07vZ$ij\n? ~#e|糷n}6 k_woӮ5^yZ1heM9Ufwuy;.e}r}>'״j-e~_.Eȼ6w5{vo}ݛGLpӱ}>|[on:ͼC5-]@.`$32k΄ь LBzH N\),+y,p-KL(vS"u%e2#17ZƢ ]p@&kјĕ͉$Y"[yi{nh&=ꞏkǹ-,Eg7|lYMRWkl( Eh<Թ^rknu 4-pOrd#=wptsھ)7F_==T:=/ZEcTQ﫱XC'u/pHړ! GvYXr8{$dw;Oפȗ ץpE,FP8Aq"wqCZ3j-,BvG |;OF{doS;wfyY;jûFן~ k܌]HoQ5%Ӄ([= Ƌ#%!%!W Qf#z_ְ}գ4kzQyx"0>P" 4k y٬6Em]l{85oY~7o&߹p];Y(k`0RRӠ*ˤYҩRj"̈hU3NumZ(lڦK =T*m)ҤKu&ɽѵx{,q)8ǮkNZp=7 ׎W[FmF!Wl]HLgy48<2X8(./wG̬.lז= }8=bvf[dN58iB2X!Pn"NF&LZ|"V^@D ڙj{G55P.#ެ~_*6c1cu+/ oһ_}(׻rS bTkZ,䕛h˦?k糆aSg Cp!ոw w2 N; ԅZIB (UT$ R%3U c2VLPS *X^UX2.KbeU VHRdGKI?O}Vy;*}lXMh{ݏ4ҘSPTRP8( j1&S}:E)97Rp@%JP*+ӾYn:瀟{9 8g\ω@1IfgI/|v!Kw5)j+ #zLVQ)쒃u; c2X.r0Zp7vޓ\{ BhONAtwO+~7HMG"e|qw;@gMuqgВ"4cSi@}7zh-wH0؛ӥ&DhD\pC@KRo[z1cݫ!U@PRib\+m^WLVeDa`k&jX:rwОD(81IdžZ<@փ`E46b HAo7Q :$Э8:o¿5"F$Vp2ų4]ׄeX+>U"Z3`d$g(r[MT% A-W@#ř#.v,ѣk0=.hn`lc%BꍧŁ bgA>^(R/R RNm*%{R5.L8@6PFNo;U'DR˯NWREwM aޏ[6M,θnr):,XV4oh'$R/Gֺpj` tCD}Hԓ>]JTc'4Jj6*9N0RzaAX[S.ĀW{/#IX10Jo]LDtٹSp!r&2/MKXVK:(2_-='t2ܥ؅n`"7kknVhܡZmYJUe: EiyqIocË0p8FDr4?4F7{J+uQQLwۻ(z0%s/_LI! |ʇ/._LRgrޢ8CqDg_Yf8{op싳2YHcxኮAd6o*:=( tDP"n?ͤzntަ O\Ҟ%Y8{{}$X}8Jǹ Lqz`o Z8,~;هj-,lweY+l`n(60Z}TT+d Pz( T[ys%[}TT;5JɉEЦ(.ZF5ECYT8"[l]*Wj(VPz~((J,JoÊajv(=kpC){ҭ@>eAwә-(>gQ*M;t)#n(ͨt(=orRN3Vt2Քu\gRP*75(GL 82aOL50헞7J)uC)NVR7fTC1^sD)n(er`4T6Ĩn?or) r?q헞9J#J̀4'"e~?Wx5YMj\1\vЇo% >Xd1iC*̏``SKLR#ʡoǓ CQMǩꇑb}1al҈wj' \)i^2WʸPo)A%x4uFEDB4'USh9#ϡV,*کCՑ+GR0)lޕ.rPi8vqn$%-tq 9#NzzDVK.6"~^ꇗg$yHr0'lspX,Tj#ͰNU!O&>n|30ٞͰ -2/z.BټmJ9zBfXVW LbI@'9Q)Y2i8$NiH1 "APUKv%fYjTr46s-kԯ%z\ oVA6^RK5,f.I/Slvx4?쇋)*p/UV_[E|]TO 8He& q9 u )Q@xiMBXޯv42\o<5Oַ^Z G.qͳ>{sn)'(ǻB$z^Eؒ:bAb\G0 B1 RK0j'ȞP*CS {twp H{.uSۛ߼xYp|w.n9eM ?%eQdq*Pi̭  T$4K!I|\UX g`IysFhg28iba(l$I R-Ti11^P`Diڌ"TUap>j_F鵽0.ڧ-ao2Clȭ(w-.{i+˓˃b->,& k[f-iwLr+aq٩lo`֖UX 8v}W` EhML*b" OJҘА&BTRćs~?ۗZ_y2^ϧusз,~A7m-ŋ|[P2|CSWl&X}>7_~y$ѭz{ O_ zOϯ/P^J"~;F{Wߣlp0m@T`6ahpBrȗ%/&ll{{R(cliPa..m.ٻQ*MZ чC@JA(@GJH QB m68#, ! D$ݘبhlfILmacJĀӜ3>9`[/p3h3'iI,hiXlLkIPfK{тޭ"+Ob`asH3[v^鬞Jv ~9g܊w zPnʱH}nУaWPVhɎfW#x3ixrX>7@CF)$N0pو⩟x\3dMZPP;iG,<|<`4b1M>f~NwT֪҃JJSRwLILg Q;׊lw:fnU g'r0[Y?Nn[zdk>lxln6\>uo˃ ޚok9BQzk7wLG8<}~O\0"[r"wޏIb׃޻hc-(%/IiÏ-:Nmq)n'cDW\WF%Z6JNYk\eO]4cI~8zawo/#Zt=>]|d 9tTh4b J+{ -Dԧ ߏRxN X<9֛2偛ޢ?jWցZkЧ>@-uz/WC+xh;jZ8ܠZj\U"@do:ӻgϼ|[,)Q~-h]n]E:[cףMO<ȥCVg}]d&lje*׫֬(w5Wu.zXM\r(q}NQ懎5!C SO+vBT/\Gv: e 9QՁQLQZoqnj7U>v t|NE uc?v!/%LqGL!6A_ypz=N}nzm7^YWŐ`~suK0}5][ NG}>xM(`[.~Z%2hSua1oFkЦXCٹ%EC͋j 0F6+I 0Ln}@o.r<!/^*\iJd;|y>hY_VL5:j:!ꅩBٶZ@͗.v;׃2@nI[y-aJ:bJQ|gT2vDP(CZU7;\tTj҇^Ը4Zӳv(=GrȜX]ʉiJQ=,Qm.=o 쳂J5T8fͨCT;:z:s n.Վ[KhF)h72yoJQ=,S Jvvy7ƒ7T.=srRn-< AD)n(ͨfҰgR!P*h.GDԡPj(eІ]}]Q 0AQul n)94!SF bB41 rʉIHJ0INLH~SdZ 7KHg EƥkqxJ4oy!BvAL)3~q}LyZd]7O.Qc;kjOyf!6Ku{d fmvDC"xN%lH4?H^H0BhE{ A 75bapB)RLɓ~yRzI!ד4\Q+!wӯE0HQ0ٰ[4Oxa{4"&o3q]Pp@t?]Ajos5/t~Nn_<_w֬0L05} CIX?N}۲fmU1CY6Fc9?/ 9\М"IS8.OU8<^z) Ώ%n5yؑ-JEkSId; S'8RU Bz$i^3;-t4$7P-tcv' D֓M3nS`%AKILt\թC5R&8`G,ǪC^m=̛<ȉ0!!b&vK(H CQa0BL+lS) m$@8ve@gCNSU, <f U J3a d,)P'Cn< ([_tӢo&ɿ|/rC W2|C;Wtz !yBg#eM&G?nmH%T z~}d:/nUld ^}oA\lR?nсd"R +'@kc.zwKN_L&ؖضP\Iő`@ <̣΍>wk:[]d{3G灡 pnӵ鋾V|B.XXLn`<Uj dr a'jxξ^>:*`?l<`Q)#}2[ybb~_;c~l#e1^Imtq)<.{4B7 9:x=Jb6pzUVӎ@ׂ;5~;VSa(􍊓>g~@"4;(mmpYҍfYز’ {~ ,NٽBADd*JJ=HT xRٚC;(+\/? Ŀt=ng *!U r!=Z&yt W(j 1jo.&_v2hg n I~xSL8Nv~Ǘ.r@4 Xa$)>C80i< CD @˂X~I|l]y9,[k$ˣػ:#ݧEC8h*"D$44TL€Il"H 8@{ RTq4m{Vb&ǞyΉuOq2 q$?{ʔ^nfEU~8yqJE<vfw+4ȑPXUF^Gy5.z^}23Diq1ӄX#sBEy9KDV,[LNA' NqyrMCyohA,<ֺy^;tzhRefF䆂h k թ0(Q\gFӄ ="Aľ96us:G*@ş-Y,<{2xֲ0Pŕi߬p*&x}tr8.(2K4b6ruFDPRpwKa'u0&&LRI+IsĈ9R:-D6~l'ԟȑ0f.]G#f0ĔhtvzWC171% M{oܶR\oxs=\ 6}8l@Y=뽕 ncl2{w xr)$%g=~y2M}ГчM-3>fTC:ng̖6Pۛэk+@@xSumԭUy3+f"]:;Sw؃rkVE?׻ hgkO b ܽ߁ B%+rtn=c$Ͻȱ02+ۮd`hrz'5`#Ҍѕͩ1}cbBBJ}߿*d3}FhL5(h?r^*$=En1.*^Eٝ)\қ.sUVwWO̧h|:9M1J݄˳[;/~1|1ed+> #e3rTD%kYrs^\Y]qO ҥ]TDYޔ!\Etr}kwsCn8rWPw/j.>u ^'˱ 㮸M9pU΄)h;ճ PN;: N&֍N)FrςN'D(q ?68 I.ؕt9P {"j9Ĥ826WsGa1@ c9sXҧBL#z iatc’B;LKlnIӽf2ZJ D9ȼQ^cdklav梀!kO4`S.UƮg@m綤0h1=gDzmcP- e@V͂0%ߘjӻ re%)ͩƠVL SS-uPF=CH!l!) US64䕫hNU*{7QI>w偏Ď̀Lͻyz64䕫hNISR2f6MF s7Z}A@s'BǷ8;`2aSN p~eDSLF\%I3C8U ay$U{:v),{h-QixTxOd2Go[ᩧ,J/7P &QᇡJԤ"Q>e!js7"ES0}MyOJ"* !cSAО@6E98)D`\qm(#˹y6j}+@aYM#3:/X+,,DE;VGas:Q( oCbproFIhSQ̐iƊ IB #96B>aFQr+pL(u/@zHclE.M3)5嚥)g #˔iT8Ig:hRWU{ws9Q[JɷKy`j[\r?5)YaavƒWfT{Y0U~Z3X&'+VdY뭳TwDDT%{퇏?B˛"`XK4jѲXH$6[K6J(LVOaH*EjVERNZVq^/1"a)>ePrE)}JrA*OB)&=D"la;7@d>w㔹T_Go]Hw]/ˢ|~LM]!k:''k*Q5ȏ!25,J.--\MFwg/YX}mMn?s;]E;QG#}ݩ\ XiIұβd cR8ѰyxQ x!'J%g]˿xK[xT'yM=m|O|p1N:(3dbR#1bb+:zdXQ?WdfkQٮ(ReyO.d$Qy51ZwUu03Tm|ZH,vo} O>hw`}YF^e=(_M L:6Uo e/]@PJ0Ԋͨf?kTj<}tacꧥNo>S G-}ZJ5JcM>QG-}Z KRdZ:լQK9RNJ4u)G-}Z=O`_&h[ Ƃpr>Nw>}vWow{^'k ,)1FHt "xJ90kuӫww_[|g٩&18% @ga#ƕ>ocW лV#ХC%n/1[_8;Z٥шtu4&W$VeC) .@Ҷ~*gh^kCL!n҅1MK+8yf &DC sq :sBaѩH{:P#r|VR%-cq)B!GXDXc^WPMz: 9Pٰ.| _i\^;ȫ T5Puǧ"u"H& Lޤ3N]A/*b}znO_" ШPb2ߎ&[?÷v]f6(4US&8)%IBR̈vlJf41+$*ija(9Bg菳QOob؁Fe!~ Fl1(Řg[0fу״CzjDq%7*L(Eqi0&6(3ΥM܊!a%ThL/6| z0ϋw G ϒk걶F ̗i,,p9.wSo/f!b\=jfjhYv9̟_Vɤ JiࡗhE;.ڹpE5ک.)eL>I$UP6X)\̈Dny$D(HB {I(jISNkn1ٗ%Qv_7O¼~W! \D 99 /gog+h1Fx\M+rXz{۾a/6Nf(ˣ7؏OHH:]%wK:=Ha-AY}5ASkBNFK^psrZ{_)H!%nOM8&0,Ik~n|>^(F9BV$j),zQnJHXl%dX,21Zjњ QIFho6mXQ*27j@?\ lmqtM7&Jk?#.CHuy 1 E6b'I_7= 53bM/3RTsOsiɠ} 0RpYCӒC+i9Utپ[hQ[𜁘P±lEgB@^ρC@ tԟѶq KlnkTk=BdqAܸۇUjed­ˋ?=݅~Rr? r2_Lfqv=IsWe˄]c7=xj_f S./zBhPN~nwKAtR#ƻ}9FˑC{@B8D0`wl-U!I6.j)n4ޭqcjQ5lN~^/{ no˧e1YpDh`e*{Lӵ|8RR"Ӿ Ԣt.~I,u`Q(Ҍu!96oK<8VQ`w#Bq@Fr_14)RpW 'ω2,{5Z !^WOnbaQ_bpVZR;6ΤN(=>MΣJK}ՒETPz(R/E> %&ңF)0 4|)0z5Q;^'!J9 C)g9 R_ѝC(E:-5t( => Na i?b kVP?W*5>J_%&NQ R5JP BxC LjRa(Eki?|cyo/`$jK*eA;iV9,셃dƽ,  l>)/~Y>_k]06MDcv.X)F̈́ጒ2 (BXfЫY4,b,TOgfI`JNMrURz:sDf(%6!Y_#O|GAP ȧ~KK߃^VrwqbY:cG;^er 9򜩪8/&V)T#e,u}aHTQp\d'Gk8 փ̲38 bo7U[ #zSYB6JifP?坭ėhgDMbаB6SSΜ8yҳO^5PhF;>ͮr܍v(6"F4PHz"J\KV4TXꏯ(lX* uAl;N{M) ǧ_)P$XD! &(eI 3 窲Ba,TrO`C !fN#eWmSB'@ k&WZ(] *7SۺrT 8qO/D\[YSY馨R*Q*IvWpg 4q+~{[o_zwy9qN}q>[狕9>>ԝUL8T=(q!o&Oggf7|Ay!˔ps|fhMB8uR߾wm{\ƶPx`U._/?uoL ZXCQ1g6݆y٪÷/6Y|C&e9yJiWg_s*nydv ٳÌ)Z ?h=ħ_L{xg{*eShR^kaWx%0.~.#\ep"0W> ܊SN#\ߓ8w(l^: zxC֒]|pߡӃ[q}((Dzo2kPj>DK)b;S 7љ*~S;5F<c߯}q@.;2,&oVQiul.T)ْKG=FV;waaܹ(<$'br<&iRK}:ܗz΅Ş΅a[97.4sd u8G =(+ka{-O%K-m|[/D҈GEj!l1IXd,я4c!&=EI!pA9QG[Y)eyi4ܪs]py.y婵-Nv|GiC -t[X/NX8DUsW)ߧ28ǂsÌס ن!PI6{.Fk<8"|g^@>/ J ZIE0Ry^hK*%rk4R))BRVJ %*w) (D(U0_GkIxɡ๔ sU䠩([҅ө.(U4+KuHN[3Єrʵ L9\ŲB)'Nmz2х$0VZJs6i!|GBLI !#§P& K o}\ھQ-yY2-e4WC(;LS145DPRs"&q'ʃ5p2A]&.bH[dCDl_2>uPD$/̻)"2^ p9Ux#B3Gpv⛤rͧĩbiǩvs4=5;',pq /[V%ATi-ŀݝvp]^ ,}ߓͿx(1Iw@ow]bʷc(靱bhi_ XL sո*}۹*N+sWp-A % ETxslRk(;K}s2j\ZDѯ7-0]jO~]f k4Yuu:Sm Vw+.jv_] 77t!‡1 M׳-?̞8&߱¿# ަSC$O冺^XOvaޓCԆ\=~[3=lhSf S.zBh:݄Sn:Nwxc/ဨn鹈ޭq&a+a݀wChBFn:Nwx1wҫ[ymabepD3E\ة>7l Oe\BR~yK-g#\q!rfr'?y1b߭\& >& pogٿٗ; 2#_zVu~3[5hYxõVskVm2EMg.>{\nzgtyz[߼g;Y,iBBeL";M`-zƳMQ@i~WxGv]B!^O 0d,-yBP̫s,*5\ ϥok4{ wQM /2Rp:AF+qfJ׸GYxkv|k0ۀ񁚐lk.ڐC2F$oqyAԴFkEFu۹]?%NcOcN';9AA)8P@O%H>@y!c5v7IbZ"w/ʪ&p]2P1`EB8++%#VRe/߃t"wN6C(u RB_^aJs[fvi1FT\%RKBRP 14353ms (16:31:39.751) Jan 20 16:31:39 crc kubenswrapper[4995]: Trace[1800759065]: [14.353443879s] [14.353443879s] END Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.751357 4995 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.751951 4995 trace.go:236] Trace[119941472]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (20-Jan-2026 16:31:25.857) (total time: 13894ms): Jan 20 16:31:39 crc kubenswrapper[4995]: Trace[119941472]: ---"Objects listed" error: 13894ms (16:31:39.751) Jan 20 16:31:39 crc kubenswrapper[4995]: Trace[119941472]: [13.894826444s] [13.894826444s] END Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.751978 4995 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.752400 4995 trace.go:236] Trace[1186793130]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (20-Jan-2026 16:31:25.369) (total time: 14383ms): Jan 20 16:31:39 crc kubenswrapper[4995]: Trace[1186793130]: ---"Objects listed" error: 14383ms (16:31:39.752) Jan 20 16:31:39 crc kubenswrapper[4995]: Trace[1186793130]: [14.383181772s] [14.383181772s] END Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.752442 4995 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.753346 4995 trace.go:236] Trace[1703960949]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (20-Jan-2026 16:31:25.847) (total time: 13906ms): Jan 20 16:31:39 crc kubenswrapper[4995]: Trace[1703960949]: ---"Objects listed" error: 13905ms (16:31:39.753) Jan 20 16:31:39 crc kubenswrapper[4995]: Trace[1703960949]: [13.906055082s] [13.906055082s] END Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.753368 4995 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.753381 4995 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.935264 4995 apiserver.go:52] "Watching apiserver" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.939717 4995 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.939949 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.940330 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.940385 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.940480 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.940499 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.940605 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 16:31:39 crc kubenswrapper[4995]: E0120 16:31:39.940835 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:39 crc kubenswrapper[4995]: E0120 16:31:39.941020 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.941048 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:39 crc kubenswrapper[4995]: E0120 16:31:39.941242 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.945438 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 05:31:37.382116731 +0000 UTC Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.945823 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.945892 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.946002 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.946286 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.946345 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.946376 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.946414 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.946492 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.948180 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.979161 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.980486 4995 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:39672->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.980545 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:39672->192.168.126.11:17697: read: connection reset by peer" Jan 20 16:31:39 crc kubenswrapper[4995]: I0120 16:31:39.991589 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.000205 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-pgz94"] Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.000463 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pgz94" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.003970 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.005146 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.005248 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.005312 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.018719 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.029088 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.037219 4995 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.040115 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.047423 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055584 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055653 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055676 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055692 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055708 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055723 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055740 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055754 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055772 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055790 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055809 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055823 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055840 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055854 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055871 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055884 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055898 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055913 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055928 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055943 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055958 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055975 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.055991 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056006 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056010 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056020 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056087 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056162 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056188 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056212 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056271 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056277 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056310 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056328 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056346 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056363 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056379 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056394 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056420 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056437 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056453 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056477 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056469 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056494 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056516 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056576 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056597 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056617 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056634 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056646 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056651 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056682 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056701 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056719 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056738 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056753 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056770 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056786 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056802 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056818 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056833 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056913 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056935 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056951 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056966 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056985 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057000 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057016 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057031 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057050 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057067 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057089 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057104 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057141 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057159 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057175 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057190 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057206 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057222 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057239 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057255 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057271 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057287 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057304 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057321 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057337 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057358 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057394 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057410 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057425 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057440 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057455 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057470 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057487 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057504 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057519 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057534 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057549 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057564 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057583 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057599 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057615 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057632 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057647 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057663 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057678 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057694 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057712 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057728 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057744 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057763 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057782 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057802 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057817 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057833 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057851 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057866 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057905 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057922 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057938 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057955 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057969 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057999 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058016 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058033 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058050 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058068 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058090 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058105 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058143 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058161 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058177 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058193 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058211 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058228 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058244 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058261 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058277 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058294 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058311 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058329 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058344 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058363 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058379 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058397 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058412 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058427 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058449 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058467 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058483 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058499 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058524 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058540 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058557 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058593 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058610 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058627 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058643 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058662 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058678 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058695 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058714 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058731 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058748 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058765 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058783 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058802 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058818 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058833 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058849 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058865 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058883 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058900 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058917 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058936 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058953 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058971 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058988 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059006 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059023 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059039 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059057 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059077 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059096 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059297 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059319 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059341 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059360 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059382 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059400 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059419 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059436 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059453 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059469 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059486 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059503 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059519 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059535 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059552 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059569 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059584 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059623 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059643 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059665 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059684 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059706 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059726 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059744 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d87w5\" (UniqueName: \"kubernetes.io/projected/32295b75-631b-4da8-9396-c942306f0d57-kube-api-access-d87w5\") pod \"node-resolver-pgz94\" (UID: \"32295b75-631b-4da8-9396-c942306f0d57\") " pod="openshift-dns/node-resolver-pgz94" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059765 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059788 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059810 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059828 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/32295b75-631b-4da8-9396-c942306f0d57-hosts-file\") pod \"node-resolver-pgz94\" (UID: \"32295b75-631b-4da8-9396-c942306f0d57\") " pod="openshift-dns/node-resolver-pgz94" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059846 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059864 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059881 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059900 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059916 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059963 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059974 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059984 4995 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059995 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.060006 4995 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056659 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.061319 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056918 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057039 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057028 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057190 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057322 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057379 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057440 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057547 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057586 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057727 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057857 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.057993 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058004 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058015 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058200 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058217 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058365 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058450 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058606 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058636 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058717 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058758 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058845 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058850 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.058894 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059034 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059082 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059161 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059176 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059307 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059316 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059438 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059441 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059654 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059783 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059794 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.059833 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.060003 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.060018 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.060037 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.060431 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.060929 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.060958 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.061025 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.061059 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.061300 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.056796 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.061372 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.061659 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.061937 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.061945 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.061960 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.062146 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.062229 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.062268 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.062333 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.062454 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063112 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063118 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063187 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063378 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063251 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063516 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.063558 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:31:40.563540114 +0000 UTC m=+18.808144910 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063622 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063632 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063553 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.063742 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.064491 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.064706 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.064921 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.065337 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.065381 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.065499 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.065604 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.065809 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.066139 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.066449 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.066471 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.066715 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.066800 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.066833 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.067251 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.067264 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.067461 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.068037 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.068160 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.068246 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.068409 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.068734 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.068931 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.068982 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.069006 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.069286 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.069337 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.069345 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.069434 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.070321 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.070349 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.070430 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.070565 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.070681 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.070704 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.070726 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071000 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071070 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071178 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071196 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071232 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071403 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071529 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071538 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071569 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071592 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071733 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071749 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071850 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071916 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071941 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071952 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.071997 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.072212 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.072230 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.072270 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.072288 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.072560 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.072594 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.072845 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.072975 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.073024 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.072579 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.073606 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.073663 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.073795 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.074180 4995 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.074241 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:40.57422245 +0000 UTC m=+18.818827326 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.076204 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.076766 4995 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.076820 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.077136 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.077156 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.077374 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.077584 4995 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.077767 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:40.577742937 +0000 UTC m=+18.822347853 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.077589 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.078466 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.079220 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.079460 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.079592 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.082903 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.084434 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.084920 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.085510 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.085529 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.085540 4995 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.085582 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:40.585569404 +0000 UTC m=+18.830174210 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.086791 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.086804 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.086892 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.086920 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.086929 4995 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.086958 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:40.586949373 +0000 UTC m=+18.831554179 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.087487 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.088270 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.088520 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.088525 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.088792 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.088879 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.089333 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.089550 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.090064 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.091190 4995 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd" exitCode=255 Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.091378 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.095386 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.095602 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.095618 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.095763 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.095950 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.096426 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.096871 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.096923 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.096988 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.097180 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.097596 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.098783 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.098911 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.099102 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.099546 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.099650 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.099700 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.099796 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.099801 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.099836 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.099849 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.100580 4995 scope.go:117] "RemoveContainer" containerID="c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.101081 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.101271 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.102183 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.102797 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.103249 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.103332 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.102376 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.103832 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.104043 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.105671 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.105768 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.105865 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.105932 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.106127 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.106244 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.106318 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.106469 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.106561 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.106685 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.107530 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.109781 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.114614 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.126464 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.133160 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.135448 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.136551 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.144608 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.152298 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.158449 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160584 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160707 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160731 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d87w5\" (UniqueName: \"kubernetes.io/projected/32295b75-631b-4da8-9396-c942306f0d57-kube-api-access-d87w5\") pod \"node-resolver-pgz94\" (UID: \"32295b75-631b-4da8-9396-c942306f0d57\") " pod="openshift-dns/node-resolver-pgz94" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160740 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160773 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/32295b75-631b-4da8-9396-c942306f0d57-hosts-file\") pod \"node-resolver-pgz94\" (UID: \"32295b75-631b-4da8-9396-c942306f0d57\") " pod="openshift-dns/node-resolver-pgz94" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160874 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160891 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160902 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160915 4995 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160999 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161016 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161027 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161038 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161170 4995 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161184 4995 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161195 4995 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.160928 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161206 4995 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161247 4995 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161263 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161277 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161290 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161302 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161314 4995 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161337 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161097 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/32295b75-631b-4da8-9396-c942306f0d57-hosts-file\") pod \"node-resolver-pgz94\" (UID: \"32295b75-631b-4da8-9396-c942306f0d57\") " pod="openshift-dns/node-resolver-pgz94" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161350 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161446 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161729 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161786 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161797 4995 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161806 4995 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161814 4995 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161959 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161970 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.161979 4995 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162020 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162030 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162039 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162049 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162058 4995 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162067 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162105 4995 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162115 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162124 4995 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162132 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162149 4995 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162159 4995 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162192 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162202 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162210 4995 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162219 4995 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162227 4995 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162239 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162283 4995 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162386 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162689 4995 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162777 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162790 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162798 4995 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162807 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162946 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162959 4995 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162967 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.162975 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163004 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163013 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163021 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163031 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163042 4995 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163226 4995 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163370 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163380 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163405 4995 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163431 4995 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163439 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163474 4995 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163483 4995 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163491 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163500 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163509 4995 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163562 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163748 4995 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163850 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163903 4995 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.163944 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.164147 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.164157 4995 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.164186 4995 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.164196 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.164315 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.164326 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.164543 4995 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.164649 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165194 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165210 4995 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165301 4995 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165370 4995 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165381 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165440 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165450 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165458 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165467 4995 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165516 4995 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165525 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165549 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165782 4995 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165793 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165804 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165869 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165878 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.165887 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166028 4995 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166039 4995 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166048 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166057 4995 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166107 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166116 4995 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166129 4995 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166138 4995 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166146 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166154 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166205 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166215 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166223 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166380 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166391 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166399 4995 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166434 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166443 4995 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166457 4995 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166464 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166475 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166484 4995 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166494 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166524 4995 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166532 4995 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166540 4995 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166578 4995 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166811 4995 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166822 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166862 4995 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166870 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166878 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166887 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166895 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166965 4995 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166989 4995 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.166997 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167006 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167015 4995 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167024 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167032 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167068 4995 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167157 4995 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167167 4995 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167175 4995 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167185 4995 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167193 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167202 4995 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167210 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167218 4995 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167900 4995 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.167996 4995 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168009 4995 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168018 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168026 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168154 4995 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168163 4995 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168171 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168180 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168189 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168198 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168207 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168215 4995 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168223 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168231 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168247 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168255 4995 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168263 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168271 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168279 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168290 4995 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168298 4995 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168306 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168314 4995 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168322 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168330 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168358 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168366 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168375 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168383 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168391 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168400 4995 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168408 4995 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168418 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.168079 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.175990 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d87w5\" (UniqueName: \"kubernetes.io/projected/32295b75-631b-4da8-9396-c942306f0d57-kube-api-access-d87w5\") pod \"node-resolver-pgz94\" (UID: \"32295b75-631b-4da8-9396-c942306f0d57\") " pod="openshift-dns/node-resolver-pgz94" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.176750 4995 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.176845 4995 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.178325 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.178376 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.178385 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.178398 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.178408 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.180567 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.188095 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.191481 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.195900 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.195934 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.195943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.195959 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.195968 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.196936 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.203724 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.206964 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.207309 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.207328 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.207344 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.207355 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.215655 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.218826 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.218868 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.218877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.218892 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.218901 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.228859 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.231823 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.231845 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.231854 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.231865 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.231872 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.240695 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.240802 4995 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.242378 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.242402 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.242412 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.242422 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.242430 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.253008 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.259886 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.265524 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 16:31:40 crc kubenswrapper[4995]: W0120 16:31:40.265820 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-ce35ff59a66843049ad55f5bdcceb975e65d86ff224d353de77fe8f8b9cfd789 WatchSource:0}: Error finding container ce35ff59a66843049ad55f5bdcceb975e65d86ff224d353de77fe8f8b9cfd789: Status 404 returned error can't find the container with id ce35ff59a66843049ad55f5bdcceb975e65d86ff224d353de77fe8f8b9cfd789 Jan 20 16:31:40 crc kubenswrapper[4995]: W0120 16:31:40.277253 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-c0fd84d3ba155bce26b439c5a4e19726f25d3f9ed7ef45f5b14530f352c2f511 WatchSource:0}: Error finding container c0fd84d3ba155bce26b439c5a4e19726f25d3f9ed7ef45f5b14530f352c2f511: Status 404 returned error can't find the container with id c0fd84d3ba155bce26b439c5a4e19726f25d3f9ed7ef45f5b14530f352c2f511 Jan 20 16:31:40 crc kubenswrapper[4995]: W0120 16:31:40.288372 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-7f3bd7c72111976ab18a63def8464fc708f911fa12dd0780cfe3cb3280fbd4b8 WatchSource:0}: Error finding container 7f3bd7c72111976ab18a63def8464fc708f911fa12dd0780cfe3cb3280fbd4b8: Status 404 returned error can't find the container with id 7f3bd7c72111976ab18a63def8464fc708f911fa12dd0780cfe3cb3280fbd4b8 Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.313692 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pgz94" Jan 20 16:31:40 crc kubenswrapper[4995]: W0120 16:31:40.344264 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32295b75_631b_4da8_9396_c942306f0d57.slice/crio-47fd04c610d129bcb5868ae400fd1babd14e4cc2b46c95f2852dfd0ed409b113 WatchSource:0}: Error finding container 47fd04c610d129bcb5868ae400fd1babd14e4cc2b46c95f2852dfd0ed409b113: Status 404 returned error can't find the container with id 47fd04c610d129bcb5868ae400fd1babd14e4cc2b46c95f2852dfd0ed409b113 Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.346716 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.346749 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.346760 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.346777 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.346790 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.452434 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.452466 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.452475 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.452487 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.452517 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.554206 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.554243 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.554252 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.554267 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.554277 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.571329 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.571523 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:31:41.571496117 +0000 UTC m=+19.816100923 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.656421 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.656477 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.656527 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.656549 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.656565 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.671797 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.671858 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.671884 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.671909 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672026 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672044 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672057 4995 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672131 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:41.672114272 +0000 UTC m=+19.916719098 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672528 4995 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672577 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:41.672564775 +0000 UTC m=+19.917169591 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672613 4995 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672639 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:41.672631637 +0000 UTC m=+19.917236453 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672690 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672704 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672715 4995 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:40 crc kubenswrapper[4995]: E0120 16:31:40.672742 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:41.67273424 +0000 UTC m=+19.917339066 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.701602 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.715484 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:40Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.730320 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:40Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.744825 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:40Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.754212 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:40Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.758584 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.758630 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.758642 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.758659 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.758671 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.774213 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:40Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.788618 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:40Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.817151 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:40Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.832516 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:40Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.861141 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.861183 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.861199 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.861221 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.861237 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.946437 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 20:44:38.9379217 +0000 UTC Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.963051 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.963116 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.963129 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.963146 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:40 crc kubenswrapper[4995]: I0120 16:31:40.963157 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:40Z","lastTransitionTime":"2026-01-20T16:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.065687 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.065728 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.065736 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.065752 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.065761 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.094695 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.094738 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"ce35ff59a66843049ad55f5bdcceb975e65d86ff224d353de77fe8f8b9cfd789"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.096081 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pgz94" event={"ID":"32295b75-631b-4da8-9396-c942306f0d57","Type":"ContainerStarted","Data":"81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.096104 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pgz94" event={"ID":"32295b75-631b-4da8-9396-c942306f0d57","Type":"ContainerStarted","Data":"47fd04c610d129bcb5868ae400fd1babd14e4cc2b46c95f2852dfd0ed409b113"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.097778 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.099542 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.099976 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.101256 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"7f3bd7c72111976ab18a63def8464fc708f911fa12dd0780cfe3cb3280fbd4b8"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.103061 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.103161 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.103190 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c0fd84d3ba155bce26b439c5a4e19726f25d3f9ed7ef45f5b14530f352c2f511"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.104623 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.116875 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.136030 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.150701 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.162547 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.168317 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.168363 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.168375 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.168394 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.168406 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.178687 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.191551 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.204027 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.214785 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.226959 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.245487 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.255600 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.270481 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.270550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.270559 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.270574 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.270584 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.272563 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.283807 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.293497 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.312727 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.325486 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.372108 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.372146 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.372155 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.372171 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.372181 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.417666 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-ns9m2"] Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.418005 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-vj5zz"] Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.418399 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.418811 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-vlvwg"] Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.418987 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.419095 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.420461 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qp9h9"] Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.420554 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.420585 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.421158 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.421659 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.422104 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.422327 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.422995 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.423010 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.423133 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.423258 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.423409 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.423899 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.424533 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.424540 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.425253 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.425366 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.425535 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.425587 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.425689 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.425740 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.440179 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.461128 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.475527 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.475576 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.475587 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.475609 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.475621 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.478950 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-ovn-kubernetes\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.478984 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-proxy-tls\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479004 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-cnibin\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479024 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479043 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-var-lib-cni-multus\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479059 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-var-lib-kubelet\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479086 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-cni-dir\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479210 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-run-netns\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479252 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-config\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479279 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-systemd-units\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479302 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-systemd\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479320 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479342 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-node-log\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479360 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-bin\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479377 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e82420c5-a3ae-43ea-a208-b757794521a6-ovn-node-metrics-cert\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479418 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-ovn\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479435 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptmxc\" (UniqueName: \"kubernetes.io/projected/e82420c5-a3ae-43ea-a208-b757794521a6-kube-api-access-ptmxc\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479452 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-cni-binary-copy\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479470 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-var-lib-cni-bin\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479489 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-netd\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479506 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-conf-dir\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479592 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-os-release\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479650 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5008a882-4540-4ebe-8a27-53f0de0cbd4a-cni-binary-copy\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479687 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-var-lib-openvswitch\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479715 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-log-socket\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479739 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-os-release\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479774 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-run-multus-certs\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479820 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-slash\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479842 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-etc-openvswitch\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479874 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-openvswitch\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479898 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-socket-dir-parent\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479920 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-run-k8s-cni-cncf-io\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479946 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-hostroot\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479974 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-cnibin\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.479998 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-kubelet\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480020 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctcsr\" (UniqueName: \"kubernetes.io/projected/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-kube-api-access-ctcsr\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480042 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480065 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-daemon-config\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480092 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z87zh\" (UniqueName: \"kubernetes.io/projected/5008a882-4540-4ebe-8a27-53f0de0cbd4a-kube-api-access-z87zh\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480136 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-system-cni-dir\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480159 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-system-cni-dir\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480183 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jq6l\" (UniqueName: \"kubernetes.io/projected/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-kube-api-access-7jq6l\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480212 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-rootfs\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480231 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-mcd-auth-proxy-config\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480251 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-etc-kubernetes\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480270 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-netns\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480289 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-env-overrides\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.480309 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-script-lib\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.490563 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.543365 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.572584 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.578322 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.578370 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.578382 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.578398 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.578409 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580641 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580743 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-node-log\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580773 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-bin\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580797 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e82420c5-a3ae-43ea-a208-b757794521a6-ovn-node-metrics-cert\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580820 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-cni-binary-copy\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580854 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-ovn\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580875 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptmxc\" (UniqueName: \"kubernetes.io/projected/e82420c5-a3ae-43ea-a208-b757794521a6-kube-api-access-ptmxc\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580897 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-var-lib-cni-bin\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580918 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-netd\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580944 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-os-release\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.580989 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-conf-dir\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581028 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5008a882-4540-4ebe-8a27-53f0de0cbd4a-cni-binary-copy\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581048 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-var-lib-openvswitch\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581069 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-log-socket\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581090 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-os-release\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581138 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-openvswitch\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581171 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-run-multus-certs\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581193 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-slash\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581214 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-etc-openvswitch\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581236 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-cnibin\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581256 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-socket-dir-parent\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581278 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-run-k8s-cni-cncf-io\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581300 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-hostroot\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581320 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-kubelet\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581341 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctcsr\" (UniqueName: \"kubernetes.io/projected/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-kube-api-access-ctcsr\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581361 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581381 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-daemon-config\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581399 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z87zh\" (UniqueName: \"kubernetes.io/projected/5008a882-4540-4ebe-8a27-53f0de0cbd4a-kube-api-access-z87zh\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581419 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jq6l\" (UniqueName: \"kubernetes.io/projected/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-kube-api-access-7jq6l\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581439 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-system-cni-dir\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581459 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-system-cni-dir\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581484 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-mcd-auth-proxy-config\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581511 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-rootfs\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581531 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-etc-kubernetes\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581551 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-netns\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581570 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-env-overrides\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581590 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-script-lib\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581611 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-ovn-kubernetes\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581631 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-proxy-tls\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581653 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-cnibin\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581674 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581700 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-run-netns\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581719 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-var-lib-cni-multus\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581739 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-var-lib-kubelet\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581759 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-cni-dir\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581782 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581804 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-config\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581826 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-systemd-units\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581851 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-systemd\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.581932 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-systemd\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.582020 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:31:43.582001572 +0000 UTC m=+21.826606378 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.582051 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-node-log\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.582093 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-bin\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.582959 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-ovn-kubernetes\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.583016 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-rootfs\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.583059 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-var-lib-cni-multus\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.583063 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-netns\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.583714 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-var-lib-kubelet\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.583872 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-cni-binary-copy\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.583905 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-cni-dir\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.583956 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-ovn\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584007 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.583613 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-etc-kubernetes\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584031 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-cnibin\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584096 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-run-netns\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584187 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-system-cni-dir\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584231 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-systemd-units\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584274 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-system-cni-dir\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584338 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-var-lib-cni-bin\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584363 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-run-multus-certs\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584419 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-slash\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584436 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-env-overrides\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584504 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-netd\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584547 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-etc-openvswitch\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584645 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-hostroot\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584659 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-os-release\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584669 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-cnibin\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584750 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584823 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-host-run-k8s-cni-cncf-io\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584709 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-mcd-auth-proxy-config\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.585041 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-conf-dir\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.584844 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-daemon-config\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.585062 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-openvswitch\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.585153 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-os-release\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.585068 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-kubelet\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.585249 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/5008a882-4540-4ebe-8a27-53f0de0cbd4a-multus-socket-dir-parent\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.585275 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-config\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.585246 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-log-socket\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.585500 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.585596 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-var-lib-openvswitch\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.586228 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5008a882-4540-4ebe-8a27-53f0de0cbd4a-cni-binary-copy\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.586999 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-script-lib\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.590704 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-proxy-tls\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.591518 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e82420c5-a3ae-43ea-a208-b757794521a6-ovn-node-metrics-cert\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.624741 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptmxc\" (UniqueName: \"kubernetes.io/projected/e82420c5-a3ae-43ea-a208-b757794521a6-kube-api-access-ptmxc\") pod \"ovnkube-node-qp9h9\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.628309 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z87zh\" (UniqueName: \"kubernetes.io/projected/5008a882-4540-4ebe-8a27-53f0de0cbd4a-kube-api-access-z87zh\") pod \"multus-vlvwg\" (UID: \"5008a882-4540-4ebe-8a27-53f0de0cbd4a\") " pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.628348 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.633962 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jq6l\" (UniqueName: \"kubernetes.io/projected/a0e5bec5-c9a4-46b0-87c1-5eea75de723e-kube-api-access-7jq6l\") pod \"multus-additional-cni-plugins-vj5zz\" (UID: \"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\") " pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.643649 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctcsr\" (UniqueName: \"kubernetes.io/projected/80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a-kube-api-access-ctcsr\") pod \"machine-config-daemon-ns9m2\" (UID: \"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\") " pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.654225 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.672406 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.681478 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.681539 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.681553 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.681579 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.681595 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.682374 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.682425 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.682454 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.682482 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682517 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682546 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682558 4995 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682577 4995 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682606 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682633 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682650 4995 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682655 4995 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682616 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:43.682599917 +0000 UTC m=+21.927204723 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682741 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:43.68271288 +0000 UTC m=+21.927317686 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682758 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:43.682751531 +0000 UTC m=+21.927356337 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.682780 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:43.682767862 +0000 UTC m=+21.927372888 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.688805 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.711208 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.730900 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.733041 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.741372 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-vlvwg" Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.745798 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80d6e58a_a0f6_4c31_b841_2c4fbaaf8f8a.slice/crio-c43df95bda6e63d17d4ddcf1016f945dde81f8f4c25c2c5fcc957fcc2f6568d1 WatchSource:0}: Error finding container c43df95bda6e63d17d4ddcf1016f945dde81f8f4c25c2c5fcc957fcc2f6568d1: Status 404 returned error can't find the container with id c43df95bda6e63d17d4ddcf1016f945dde81f8f4c25c2c5fcc957fcc2f6568d1 Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.745951 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.752370 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.758737 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.760020 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.768794 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.780880 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.783731 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.783782 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.783794 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.783865 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.783874 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.794971 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.809703 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.824565 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.842100 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.860944 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.873236 4995 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.873412 4995 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl": watch of *v1.Secret ended with: very short watch: object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.873847 4995 reflector.go:484] object-"openshift-machine-config-operator"/"proxy-tls": watch of *v1.Secret ended with: very short watch: object-"openshift-machine-config-operator"/"proxy-tls": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.874006 4995 reflector.go:484] object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": watch of *v1.Secret ended with: very short watch: object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.874035 4995 reflector.go:484] object-"openshift-machine-config-operator"/"kube-rbac-proxy": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"kube-rbac-proxy": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.874058 4995 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovnkube-config": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"ovnkube-config": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.874234 4995 reflector.go:484] object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.874264 4995 reflector.go:484] object-"openshift-machine-config-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.874488 4995 reflector.go:484] object-"openshift-multus"/"cni-copy-resources": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-multus"/"cni-copy-resources": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.874732 4995 reflector.go:484] object-"openshift-ovn-kubernetes"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.874983 4995 reflector.go:484] object-"openshift-multus"/"multus-daemon-config": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-multus"/"multus-daemon-config": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.875121 4995 reflector.go:484] object-"openshift-multus"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-multus"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.875156 4995 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert": watch of *v1.Secret ended with: very short watch: object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.875038 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ovn-kubernetes/pods/ovnkube-node-qp9h9/status\": read tcp 38.102.83.143:39894->38.102.83.143:6443: use of closed network connection" Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.875188 4995 reflector.go:484] object-"openshift-ovn-kubernetes"/"env-overrides": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"env-overrides": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.875153 4995 reflector.go:484] object-"openshift-multus"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-multus"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.875461 4995 reflector.go:484] object-"openshift-multus"/"default-dockercfg-2q5b6": watch of *v1.Secret ended with: very short watch: object-"openshift-multus"/"default-dockercfg-2q5b6": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.875492 4995 reflector.go:484] object-"openshift-machine-config-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.875763 4995 reflector.go:484] object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz": watch of *v1.Secret ended with: very short watch: object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.875812 4995 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovnkube-script-lib": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-ovn-kubernetes"/"ovnkube-script-lib": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: W0120 16:31:41.874757 4995 reflector.go:484] object-"openshift-multus"/"default-cni-sysctl-allowlist": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-multus"/"default-cni-sysctl-allowlist": Unexpected watch close - watch lasted less than a second and no items received Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.893446 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.893482 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.893491 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.893505 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.893516 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.947594 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 01:55:21.387681798 +0000 UTC Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.988595 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.988640 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.988615 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.988725 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.988826 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:41 crc kubenswrapper[4995]: E0120 16:31:41.988891 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.994196 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.995074 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.995299 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.995324 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.995332 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.995343 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.995352 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:41Z","lastTransitionTime":"2026-01-20T16:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.996891 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.997833 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.999170 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 20 16:31:41 crc kubenswrapper[4995]: I0120 16:31:41.999927 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.000708 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.003777 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.004639 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.005204 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.005761 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.006572 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.006876 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.008284 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.008870 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.009464 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.010457 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.011154 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.012503 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.013145 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.013743 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.014812 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.015464 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.015918 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.018355 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.018820 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.019981 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.020799 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.021731 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.022068 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.022443 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.023355 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.023930 4995 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.024069 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.026446 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.027229 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.027698 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.029644 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.030906 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.031534 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.032685 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.033425 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.034394 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.035035 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.036434 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.037525 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.038042 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.038624 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.039722 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.039928 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.040767 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.041947 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.042545 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.043488 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.044316 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.044969 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.045840 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.051810 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.067043 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.080522 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.091842 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.096679 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.096705 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.096713 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.096727 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.096736 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:42Z","lastTransitionTime":"2026-01-20T16:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.107464 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vlvwg" event={"ID":"5008a882-4540-4ebe-8a27-53f0de0cbd4a","Type":"ContainerStarted","Data":"f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.107507 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vlvwg" event={"ID":"5008a882-4540-4ebe-8a27-53f0de0cbd4a","Type":"ContainerStarted","Data":"9b37190740bcfed9ce092b07b0a2a0bd0ae797353ea35a1e09602508d04512f1"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.109072 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.109128 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.109144 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"c43df95bda6e63d17d4ddcf1016f945dde81f8f4c25c2c5fcc957fcc2f6568d1"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.109715 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.110930 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8" exitCode=0 Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.110991 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.111011 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"57f34d52cd1b4b5eb0d600f5f8894020ff7ca24e58b5bd1a9b2be2388078c844"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.113322 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" event={"ID":"a0e5bec5-c9a4-46b0-87c1-5eea75de723e","Type":"ContainerStarted","Data":"e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.113349 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" event={"ID":"a0e5bec5-c9a4-46b0-87c1-5eea75de723e","Type":"ContainerStarted","Data":"fe088e654dfc830ebe964867a16c62634233a4a4f5b441256d1873b2d2aefd64"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.124187 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.135033 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.147616 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.159320 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.174409 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.190769 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.199543 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.199574 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.199585 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.199601 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.199611 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:42Z","lastTransitionTime":"2026-01-20T16:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.206191 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.219592 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.242968 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.286976 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.303700 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.303995 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.304007 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.304027 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.304038 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:42Z","lastTransitionTime":"2026-01-20T16:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.328667 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.382356 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.406602 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.406627 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.406635 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.406648 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.406657 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:42Z","lastTransitionTime":"2026-01-20T16:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.427746 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.444424 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.456667 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.467812 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.486321 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.507534 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.508812 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.508836 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.508844 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.508857 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.508866 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:42Z","lastTransitionTime":"2026-01-20T16:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.547549 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.601185 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.610975 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.611029 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.611039 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.611053 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.611063 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:42Z","lastTransitionTime":"2026-01-20T16:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.626110 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.669892 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.709877 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.713251 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.713279 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.713288 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.713301 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.713310 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:42Z","lastTransitionTime":"2026-01-20T16:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.737922 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.764449 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.806716 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.815075 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.815126 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.815138 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.815154 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.815167 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:42Z","lastTransitionTime":"2026-01-20T16:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.845402 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.889288 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.897408 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.917789 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.918029 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.918170 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.918273 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.918357 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:42Z","lastTransitionTime":"2026-01-20T16:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.946121 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.948420 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 00:35:30.130468621 +0000 UTC Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.957843 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.966515 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-hqgw4"] Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.966902 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:42 crc kubenswrapper[4995]: I0120 16:31:42.996757 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.017296 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.020955 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.020996 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.021007 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.021023 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.021035 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.037975 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.057533 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.077901 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.101649 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fd1c7a41-f7a9-462f-b9c3-480e0715d465-host\") pod \"node-ca-hqgw4\" (UID: \"fd1c7a41-f7a9-462f-b9c3-480e0715d465\") " pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.101724 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chdms\" (UniqueName: \"kubernetes.io/projected/fd1c7a41-f7a9-462f-b9c3-480e0715d465-kube-api-access-chdms\") pod \"node-ca-hqgw4\" (UID: \"fd1c7a41-f7a9-462f-b9c3-480e0715d465\") " pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.101804 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/fd1c7a41-f7a9-462f-b9c3-480e0715d465-serviceca\") pod \"node-ca-hqgw4\" (UID: \"fd1c7a41-f7a9-462f-b9c3-480e0715d465\") " pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.106293 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.118169 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.118728 4995 generic.go:334] "Generic (PLEG): container finished" podID="a0e5bec5-c9a4-46b0-87c1-5eea75de723e" containerID="e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068" exitCode=0 Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.118850 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" event={"ID":"a0e5bec5-c9a4-46b0-87c1-5eea75de723e","Type":"ContainerDied","Data":"e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.120634 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.122662 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.122703 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.122720 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.122739 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.122753 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.139045 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.157950 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.177743 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.197106 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.203454 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/fd1c7a41-f7a9-462f-b9c3-480e0715d465-serviceca\") pod \"node-ca-hqgw4\" (UID: \"fd1c7a41-f7a9-462f-b9c3-480e0715d465\") " pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.203534 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fd1c7a41-f7a9-462f-b9c3-480e0715d465-host\") pod \"node-ca-hqgw4\" (UID: \"fd1c7a41-f7a9-462f-b9c3-480e0715d465\") " pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.203564 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chdms\" (UniqueName: \"kubernetes.io/projected/fd1c7a41-f7a9-462f-b9c3-480e0715d465-kube-api-access-chdms\") pod \"node-ca-hqgw4\" (UID: \"fd1c7a41-f7a9-462f-b9c3-480e0715d465\") " pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.204305 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/fd1c7a41-f7a9-462f-b9c3-480e0715d465-host\") pod \"node-ca-hqgw4\" (UID: \"fd1c7a41-f7a9-462f-b9c3-480e0715d465\") " pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.205546 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/fd1c7a41-f7a9-462f-b9c3-480e0715d465-serviceca\") pod \"node-ca-hqgw4\" (UID: \"fd1c7a41-f7a9-462f-b9c3-480e0715d465\") " pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.217936 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.225740 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.225781 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.225794 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.225809 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.225819 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.258134 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.277379 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.315138 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chdms\" (UniqueName: \"kubernetes.io/projected/fd1c7a41-f7a9-462f-b9c3-480e0715d465-kube-api-access-chdms\") pod \"node-ca-hqgw4\" (UID: \"fd1c7a41-f7a9-462f-b9c3-480e0715d465\") " pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.330259 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.330460 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.330507 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.330520 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.330546 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.330559 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.337564 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.357455 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.408480 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.419710 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.432624 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.432655 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.432664 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.432676 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.432685 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.458172 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.477425 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.498252 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.526620 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.534914 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.534955 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.534965 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.534981 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.534993 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.537674 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.590092 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.607382 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.607596 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:31:47.607573289 +0000 UTC m=+25.852178085 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.612788 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-hqgw4" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.624817 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: W0120 16:31:43.625242 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd1c7a41_f7a9_462f_b9c3_480e0715d465.slice/crio-ae587c10b9cd77747af95ff1079aad3b26807c9865d574b31c5e8999686705c2 WatchSource:0}: Error finding container ae587c10b9cd77747af95ff1079aad3b26807c9865d574b31c5e8999686705c2: Status 404 returned error can't find the container with id ae587c10b9cd77747af95ff1079aad3b26807c9865d574b31c5e8999686705c2 Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.637253 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.637288 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.637299 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.637314 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.637326 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.667973 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.706967 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.708247 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.708288 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.708314 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.708338 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708407 4995 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708451 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:47.708437722 +0000 UTC m=+25.953042528 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708538 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708569 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708569 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708620 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708641 4995 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708582 4995 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708725 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:47.708698479 +0000 UTC m=+25.953303315 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708536 4995 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708754 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:47.7087423 +0000 UTC m=+25.953347136 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.708806 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:47.708794282 +0000 UTC m=+25.953399218 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.739475 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.739524 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.739535 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.739553 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.739572 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.746949 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.784209 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.831260 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.841617 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.841659 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.841673 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.841693 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.841708 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.863118 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.907882 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.944190 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.944695 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.944715 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.944738 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.944757 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:43Z","lastTransitionTime":"2026-01-20T16:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.949479 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 10:48:53.396362441 +0000 UTC Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.951505 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.989298 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.989347 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.989426 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.989551 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.989702 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:43 crc kubenswrapper[4995]: E0120 16:31:43.989864 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:43 crc kubenswrapper[4995]: I0120 16:31:43.991785 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:43Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.028411 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.047315 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.047364 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.047374 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.047401 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.047410 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.067986 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.121140 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.125718 4995 generic.go:334] "Generic (PLEG): container finished" podID="a0e5bec5-c9a4-46b0-87c1-5eea75de723e" containerID="7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64" exitCode=0 Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.125787 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" event={"ID":"a0e5bec5-c9a4-46b0-87c1-5eea75de723e","Type":"ContainerDied","Data":"7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.131837 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.131891 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.131908 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.131922 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.131936 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.131952 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.133737 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-hqgw4" event={"ID":"fd1c7a41-f7a9-462f-b9c3-480e0715d465","Type":"ContainerStarted","Data":"9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.133775 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-hqgw4" event={"ID":"fd1c7a41-f7a9-462f-b9c3-480e0715d465","Type":"ContainerStarted","Data":"ae587c10b9cd77747af95ff1079aad3b26807c9865d574b31c5e8999686705c2"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.150309 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.150363 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.150378 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.150399 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.150414 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.156923 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.187943 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.202007 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.204770 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.225902 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.243366 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.253198 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.253233 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.253244 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.253259 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.253271 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.285154 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.329910 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.355683 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.355722 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.355730 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.355745 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.355753 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.364329 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.406297 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.451199 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.458812 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.458857 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.458871 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.458890 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.458902 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.490183 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.533203 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.561650 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.561695 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.561710 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.561731 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.561746 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.571766 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.608276 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.648006 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.664552 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.664597 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.664612 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.664632 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.664647 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.707565 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.744270 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.767665 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.767713 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.767729 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.767761 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.767776 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.772471 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.807935 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.847618 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.870023 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.870061 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.870076 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.870108 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.870119 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.885462 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.934946 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.950390 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 16:19:00.001413844 +0000 UTC Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.964612 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:44Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.972177 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.972207 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.972224 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.972248 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:44 crc kubenswrapper[4995]: I0120 16:31:44.972260 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:44Z","lastTransitionTime":"2026-01-20T16:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.005284 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.045847 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.074859 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.074924 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.074945 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.074974 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.074997 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.089451 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.128511 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.138025 4995 generic.go:334] "Generic (PLEG): container finished" podID="a0e5bec5-c9a4-46b0-87c1-5eea75de723e" containerID="a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188" exitCode=0 Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.138098 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" event={"ID":"a0e5bec5-c9a4-46b0-87c1-5eea75de723e","Type":"ContainerDied","Data":"a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.167709 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.178061 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.178102 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.178111 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.178124 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.178133 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.213913 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.247557 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.281404 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.281444 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.281457 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.281474 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.281485 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.312493 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.341950 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.369695 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.383560 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.383596 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.383606 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.383623 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.383634 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.406738 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.445070 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.484441 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.485805 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.485912 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.485980 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.485999 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.486009 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.525578 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.566567 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.587492 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.587525 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.587536 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.587551 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.587572 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.620146 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.647490 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.689565 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.689604 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.689615 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.689630 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.689641 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.692558 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.739613 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.767497 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.792407 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.792445 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.792455 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.792469 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.792478 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.805296 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.848212 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.887607 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:45Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.894436 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.894473 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.894481 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.894494 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.894504 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.951154 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 12:07:43.12916243 +0000 UTC Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.988672 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.988737 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:45 crc kubenswrapper[4995]: E0120 16:31:45.988786 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:45 crc kubenswrapper[4995]: E0120 16:31:45.988837 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.989135 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:45 crc kubenswrapper[4995]: E0120 16:31:45.989373 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.996492 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.996533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.996550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.996571 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:45 crc kubenswrapper[4995]: I0120 16:31:45.996588 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:45Z","lastTransitionTime":"2026-01-20T16:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.099982 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.100669 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.100710 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.100731 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.100746 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:46Z","lastTransitionTime":"2026-01-20T16:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.146719 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.149548 4995 generic.go:334] "Generic (PLEG): container finished" podID="a0e5bec5-c9a4-46b0-87c1-5eea75de723e" containerID="03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4" exitCode=0 Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.149596 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" event={"ID":"a0e5bec5-c9a4-46b0-87c1-5eea75de723e","Type":"ContainerDied","Data":"03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.175715 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.188738 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.200420 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.203094 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.203131 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.203145 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.203164 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.203176 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:46Z","lastTransitionTime":"2026-01-20T16:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.217710 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.230773 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.251279 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.272395 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.286058 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.302682 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.307474 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.307522 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.307534 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.307553 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.307566 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:46Z","lastTransitionTime":"2026-01-20T16:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.317208 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.332171 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.366335 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.408628 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.410690 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.410727 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.410736 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.410752 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.410761 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:46Z","lastTransitionTime":"2026-01-20T16:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.446104 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.493905 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:46Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.518737 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.518783 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.518793 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.518810 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.518822 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:46Z","lastTransitionTime":"2026-01-20T16:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.622740 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.622777 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.622787 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.622799 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.622808 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:46Z","lastTransitionTime":"2026-01-20T16:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.726500 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.726586 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.726614 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.726648 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.726672 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:46Z","lastTransitionTime":"2026-01-20T16:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.830254 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.830297 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.830307 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.830327 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.830338 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:46Z","lastTransitionTime":"2026-01-20T16:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.933023 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.933056 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.933065 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.933101 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.933110 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:46Z","lastTransitionTime":"2026-01-20T16:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:46 crc kubenswrapper[4995]: I0120 16:31:46.951466 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 00:17:09.421136497 +0000 UTC Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.035713 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.035752 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.035766 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.035788 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.035803 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.138765 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.138817 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.138833 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.138856 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.138872 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.157109 4995 generic.go:334] "Generic (PLEG): container finished" podID="a0e5bec5-c9a4-46b0-87c1-5eea75de723e" containerID="e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1" exitCode=0 Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.157176 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" event={"ID":"a0e5bec5-c9a4-46b0-87c1-5eea75de723e","Type":"ContainerDied","Data":"e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.182643 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.200612 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.211404 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.224450 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.238295 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.240874 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.240926 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.240945 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.240970 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.240988 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.253545 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.267415 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.285948 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.298498 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.308780 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.326821 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.338008 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.343104 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.343141 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.343150 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.343167 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.343181 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.350473 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.362084 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.374466 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:47Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.445877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.445916 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.445927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.445941 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.445978 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.548446 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.548490 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.548503 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.548520 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.548531 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.649647 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.649821 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:31:55.649802529 +0000 UTC m=+33.894407335 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.650912 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.650932 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.650940 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.650953 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.650961 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.751071 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.751187 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.751229 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.751290 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751346 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751359 4995 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751377 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751395 4995 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751410 4995 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751484 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751436 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:55.751415183 +0000 UTC m=+33.996020009 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751512 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751538 4995 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751557 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:55.751525446 +0000 UTC m=+33.996130312 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751576 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:55.751567567 +0000 UTC m=+33.996172383 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.751648 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:55.751625359 +0000 UTC m=+33.996230205 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.753094 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.753157 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.753177 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.753200 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.753215 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.855217 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.855661 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.855677 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.855700 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.855715 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.951857 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 09:21:30.925724002 +0000 UTC Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.958985 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.959032 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.959052 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.959119 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.959140 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:47Z","lastTransitionTime":"2026-01-20T16:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.988659 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.988742 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:47 crc kubenswrapper[4995]: I0120 16:31:47.988685 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.988864 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.989000 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:47 crc kubenswrapper[4995]: E0120 16:31:47.989131 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.062113 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.062160 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.062178 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.062201 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.062219 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.163508 4995 generic.go:334] "Generic (PLEG): container finished" podID="a0e5bec5-c9a4-46b0-87c1-5eea75de723e" containerID="94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21" exitCode=0 Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.163559 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.163582 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.163593 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.163560 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" event={"ID":"a0e5bec5-c9a4-46b0-87c1-5eea75de723e","Type":"ContainerDied","Data":"94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.163610 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.163622 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.177339 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.193255 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.208734 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.219829 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.237102 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.255342 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.265807 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.265834 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.265842 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.265856 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.265867 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.269298 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.280884 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.291229 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.302622 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.314420 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.326982 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.339614 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.354059 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.367180 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:48Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.368276 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.368320 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.368331 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.368349 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.368360 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.471786 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.471826 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.471837 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.471855 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.471866 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.574379 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.574453 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.574475 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.574504 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.574523 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.676701 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.676750 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.676767 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.676789 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.676802 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.780355 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.780427 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.780454 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.780484 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.780507 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.883526 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.883579 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.883592 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.883609 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.883620 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.952317 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 06:54:04.494173873 +0000 UTC Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.986122 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.986183 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.986205 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.986229 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:48 crc kubenswrapper[4995]: I0120 16:31:48.986246 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:48Z","lastTransitionTime":"2026-01-20T16:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.093685 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.093761 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.093780 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.093806 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.093824 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:49Z","lastTransitionTime":"2026-01-20T16:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.171412 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" event={"ID":"a0e5bec5-c9a4-46b0-87c1-5eea75de723e","Type":"ContainerStarted","Data":"a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.177308 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.177688 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.177729 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.185798 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.197704 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.197764 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.197784 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.197815 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.197838 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:49Z","lastTransitionTime":"2026-01-20T16:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.201664 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.204292 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.211431 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.221832 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.236069 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.253712 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.266353 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.283360 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.294914 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.300217 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.300268 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.300284 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.300307 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.300325 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:49Z","lastTransitionTime":"2026-01-20T16:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.324457 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.339021 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.356526 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.370000 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.380836 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.390606 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.401072 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.402242 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.402279 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.402291 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.402428 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.402441 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:49Z","lastTransitionTime":"2026-01-20T16:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.412297 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.422338 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.433516 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.443546 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.457830 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.488238 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.504649 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.504711 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.504724 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.504740 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.504752 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:49Z","lastTransitionTime":"2026-01-20T16:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.505013 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.518709 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.528962 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.538953 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.558744 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.568570 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.578384 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.587911 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.597149 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:49Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.606254 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.606300 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.606317 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.606337 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.606353 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:49Z","lastTransitionTime":"2026-01-20T16:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.708897 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.708965 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.708976 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.708991 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.709003 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:49Z","lastTransitionTime":"2026-01-20T16:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.812323 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.812362 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.812371 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.812387 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.812397 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:49Z","lastTransitionTime":"2026-01-20T16:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.915160 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.915210 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.915221 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.915236 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.915247 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:49Z","lastTransitionTime":"2026-01-20T16:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.952783 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 16:09:53.281895966 +0000 UTC Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.989563 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:49 crc kubenswrapper[4995]: E0120 16:31:49.989708 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.989564 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:49 crc kubenswrapper[4995]: I0120 16:31:49.989588 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:49 crc kubenswrapper[4995]: E0120 16:31:49.989965 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:49 crc kubenswrapper[4995]: E0120 16:31:49.990050 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.017889 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.017931 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.017942 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.017963 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.017973 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.120239 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.120273 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.120284 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.120303 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.120315 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.180126 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.222147 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.222179 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.222187 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.222202 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.222213 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.324352 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.324405 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.324415 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.324432 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.324442 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.349671 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.349708 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.349717 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.349732 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.349743 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: E0120 16:31:50.360733 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:50Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.365385 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.365420 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.365430 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.365446 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.365456 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: E0120 16:31:50.400377 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:50Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.407034 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.407070 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.407096 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.407112 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.407123 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: E0120 16:31:50.427547 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:50Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.430989 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.431024 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.431035 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.431052 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.431064 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: E0120 16:31:50.444574 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:50Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.447908 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.447944 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.447953 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.447970 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.447980 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: E0120 16:31:50.466240 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:50Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:50 crc kubenswrapper[4995]: E0120 16:31:50.466710 4995 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.468641 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.468657 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.468664 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.468677 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.468686 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.571126 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.571163 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.571174 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.571189 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.571198 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.674333 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.674381 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.674392 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.674409 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.674421 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.776291 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.776342 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.776353 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.776369 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.776380 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.878809 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.878904 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.878929 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.878954 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.878972 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.952923 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 01:59:41.331381152 +0000 UTC Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.981662 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.981709 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.981726 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.981747 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:50 crc kubenswrapper[4995]: I0120 16:31:50.981764 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:50Z","lastTransitionTime":"2026-01-20T16:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.084860 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.084899 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.084908 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.084923 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.084933 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:51Z","lastTransitionTime":"2026-01-20T16:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.184734 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/0.log" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.186004 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.186039 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.186050 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.186067 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.186097 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:51Z","lastTransitionTime":"2026-01-20T16:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.188594 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071" exitCode=1 Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.188631 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.189347 4995 scope.go:117] "RemoveContainer" containerID="dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.215269 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.231470 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.247863 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.260367 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.272184 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.287992 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.288029 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.288039 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.288053 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.288063 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:51Z","lastTransitionTime":"2026-01-20T16:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.291070 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI0120 16:31:50.588381 6299 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 16:31:50.588425 6299 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 16:31:50.588436 6299 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 16:31:50.588453 6299 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 16:31:50.588480 6299 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 16:31:50.588500 6299 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 16:31:50.588505 6299 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 16:31:50.588517 6299 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 16:31:50.588561 6299 factory.go:656] Stopping watch factory\\\\nI0120 16:31:50.588563 6299 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 16:31:50.588565 6299 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 16:31:50.588573 6299 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 16:31:50.588580 6299 ovnkube.go:599] Stopped ovnkube\\\\nI0120 16\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.309073 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.321652 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.334426 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.345167 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.358311 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.368929 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.381723 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.389894 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.389932 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.389940 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.389953 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.389961 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:51Z","lastTransitionTime":"2026-01-20T16:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.391618 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.405683 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.495732 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.495769 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.495778 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.495792 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.495803 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:51Z","lastTransitionTime":"2026-01-20T16:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.597626 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.597658 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.597669 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.597685 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.597695 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:51Z","lastTransitionTime":"2026-01-20T16:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.700116 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.700155 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.700167 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.700184 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.700197 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:51Z","lastTransitionTime":"2026-01-20T16:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.802689 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.802743 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.802752 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.802767 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.802778 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:51Z","lastTransitionTime":"2026-01-20T16:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.905682 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.905726 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.905741 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.905765 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.905779 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:51Z","lastTransitionTime":"2026-01-20T16:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.953541 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 12:03:04.399084687 +0000 UTC Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.989226 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.989289 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:51 crc kubenswrapper[4995]: I0120 16:31:51.989265 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:51 crc kubenswrapper[4995]: E0120 16:31:51.989474 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:51 crc kubenswrapper[4995]: E0120 16:31:51.989629 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:51 crc kubenswrapper[4995]: E0120 16:31:51.989867 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.007954 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.008012 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.008023 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.008044 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.008060 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.008930 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.028881 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.042153 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.053151 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.078775 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.096377 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI0120 16:31:50.588381 6299 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 16:31:50.588425 6299 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 16:31:50.588436 6299 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 16:31:50.588453 6299 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 16:31:50.588480 6299 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 16:31:50.588500 6299 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 16:31:50.588505 6299 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 16:31:50.588517 6299 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 16:31:50.588561 6299 factory.go:656] Stopping watch factory\\\\nI0120 16:31:50.588563 6299 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 16:31:50.588565 6299 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 16:31:50.588573 6299 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 16:31:50.588580 6299 ovnkube.go:599] Stopped ovnkube\\\\nI0120 16\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.109610 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.110165 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.110199 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.110238 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.110256 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.110269 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.123407 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.138641 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.157768 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.175505 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.196674 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/0.log" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.199159 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.201626 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.201776 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.213011 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.213114 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.213141 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.213170 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.213193 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.218514 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.241176 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.266777 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.290536 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.308679 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.316277 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.316357 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.316382 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.316414 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.316434 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.328265 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.343416 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.364527 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.377544 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.385773 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.398388 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.418262 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.418300 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.418312 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.418349 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.418362 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.422360 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.440964 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.457443 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.473757 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.487588 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.498961 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.521441 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.521490 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.521499 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.521514 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.521523 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.523069 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.541448 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI0120 16:31:50.588381 6299 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 16:31:50.588425 6299 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 16:31:50.588436 6299 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 16:31:50.588453 6299 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 16:31:50.588480 6299 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 16:31:50.588500 6299 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 16:31:50.588505 6299 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 16:31:50.588517 6299 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 16:31:50.588561 6299 factory.go:656] Stopping watch factory\\\\nI0120 16:31:50.588563 6299 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 16:31:50.588565 6299 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 16:31:50.588573 6299 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 16:31:50.588580 6299 ovnkube.go:599] Stopped ovnkube\\\\nI0120 16\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.624594 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.624878 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.624954 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.625025 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.625107 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.727444 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.727492 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.727701 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.727721 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.727734 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.829684 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.829928 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.830019 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.830113 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.830178 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.933453 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.933495 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.933503 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.933519 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.933530 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:52Z","lastTransitionTime":"2026-01-20T16:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:52 crc kubenswrapper[4995]: I0120 16:31:52.954066 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 14:42:10.554431089 +0000 UTC Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.036718 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.036763 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.036774 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.036796 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.036808 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.140156 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.140227 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.140250 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.140273 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.140336 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.207937 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/1.log" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.208571 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/0.log" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.212381 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd" exitCode=1 Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.212440 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.212485 4995 scope.go:117] "RemoveContainer" containerID="dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.213674 4995 scope.go:117] "RemoveContainer" containerID="d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd" Jan 20 16:31:53 crc kubenswrapper[4995]: E0120 16:31:53.213998 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.230164 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.242859 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.242902 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.242911 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.242927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.242937 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.243265 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.255355 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.265652 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.277891 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.287192 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.308632 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.323993 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.337352 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.346232 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.346291 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.346312 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.346337 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.346363 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.355766 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.373576 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI0120 16:31:50.588381 6299 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 16:31:50.588425 6299 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 16:31:50.588436 6299 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 16:31:50.588453 6299 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 16:31:50.588480 6299 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 16:31:50.588500 6299 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 16:31:50.588505 6299 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 16:31:50.588517 6299 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 16:31:50.588561 6299 factory.go:656] Stopping watch factory\\\\nI0120 16:31:50.588563 6299 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 16:31:50.588565 6299 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 16:31:50.588573 6299 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 16:31:50.588580 6299 ovnkube.go:599] Stopped ovnkube\\\\nI0120 16\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:52Z\\\",\\\"message\\\":\\\"\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0120 16:31:51.909620 6427 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:31:51.911423 6427 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.391420 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.406989 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.418059 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.432660 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.448532 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.448570 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.448578 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.448591 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.448600 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.551851 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.552243 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.552451 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.552605 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.552757 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.655342 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.655389 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.655399 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.655414 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.655427 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.758508 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.758575 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.758591 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.758613 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.758634 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.801956 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2"] Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.802383 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.803919 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.806117 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.823903 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.835281 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.847762 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.860851 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.860884 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.860895 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.860910 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.860946 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.862541 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.874301 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.896604 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://dfb626873caea72409c534ea7ef80ecc0b38d17ffca2bf220fcec631af9bc071\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:50Z\\\",\\\"message\\\":\\\"/pkg/client/informers/externalversions/factory.go:141\\\\nI0120 16:31:50.588381 6299 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 16:31:50.588425 6299 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 16:31:50.588436 6299 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 16:31:50.588453 6299 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 16:31:50.588480 6299 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 16:31:50.588500 6299 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 16:31:50.588505 6299 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 16:31:50.588517 6299 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 16:31:50.588545 6299 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 16:31:50.588561 6299 factory.go:656] Stopping watch factory\\\\nI0120 16:31:50.588563 6299 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 16:31:50.588565 6299 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 16:31:50.588573 6299 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 16:31:50.588580 6299 ovnkube.go:599] Stopped ovnkube\\\\nI0120 16\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:52Z\\\",\\\"message\\\":\\\"\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0120 16:31:51.909620 6427 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:31:51.911423 6427 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.910340 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.920156 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b40e213e-290e-403c-a77a-065638455b73-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.920220 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b40e213e-290e-403c-a77a-065638455b73-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.920273 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnhpk\" (UniqueName: \"kubernetes.io/projected/b40e213e-290e-403c-a77a-065638455b73-kube-api-access-tnhpk\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.920313 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b40e213e-290e-403c-a77a-065638455b73-env-overrides\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.926352 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.938815 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.948788 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.954713 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 22:50:34.96073382 +0000 UTC Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.963398 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.963429 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.963441 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.963456 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.963468 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:53Z","lastTransitionTime":"2026-01-20T16:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.967351 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.983901 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:53Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.988983 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.989064 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:53 crc kubenswrapper[4995]: I0120 16:31:53.989172 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:53 crc kubenswrapper[4995]: E0120 16:31:53.989120 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:53 crc kubenswrapper[4995]: E0120 16:31:53.989282 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:53 crc kubenswrapper[4995]: E0120 16:31:53.989386 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.002468 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.014069 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.021017 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b40e213e-290e-403c-a77a-065638455b73-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.021070 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b40e213e-290e-403c-a77a-065638455b73-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.021152 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnhpk\" (UniqueName: \"kubernetes.io/projected/b40e213e-290e-403c-a77a-065638455b73-kube-api-access-tnhpk\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.021192 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b40e213e-290e-403c-a77a-065638455b73-env-overrides\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.021914 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b40e213e-290e-403c-a77a-065638455b73-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.022064 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b40e213e-290e-403c-a77a-065638455b73-env-overrides\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.028309 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.028534 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b40e213e-290e-403c-a77a-065638455b73-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.046778 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.049342 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnhpk\" (UniqueName: \"kubernetes.io/projected/b40e213e-290e-403c-a77a-065638455b73-kube-api-access-tnhpk\") pod \"ovnkube-control-plane-749d76644c-75tq2\" (UID: \"b40e213e-290e-403c-a77a-065638455b73\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.065901 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.065952 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.065966 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.065986 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.066002 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.116900 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" Jan 20 16:31:54 crc kubenswrapper[4995]: W0120 16:31:54.135447 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb40e213e_290e_403c_a77a_065638455b73.slice/crio-97e7eb532aff4eaafc44158df077d4fbe26cfbf89e6b321935c69f2e089571c3 WatchSource:0}: Error finding container 97e7eb532aff4eaafc44158df077d4fbe26cfbf89e6b321935c69f2e089571c3: Status 404 returned error can't find the container with id 97e7eb532aff4eaafc44158df077d4fbe26cfbf89e6b321935c69f2e089571c3 Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.168978 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.169031 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.169043 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.169063 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.169098 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.217379 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/1.log" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.226897 4995 scope.go:117] "RemoveContainer" containerID="d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd" Jan 20 16:31:54 crc kubenswrapper[4995]: E0120 16:31:54.227042 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.228361 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" event={"ID":"b40e213e-290e-403c-a77a-065638455b73","Type":"ContainerStarted","Data":"97e7eb532aff4eaafc44158df077d4fbe26cfbf89e6b321935c69f2e089571c3"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.239648 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.250521 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.262146 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.270928 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.271051 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.271165 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.271259 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.271368 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.273596 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.286450 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.299845 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.309259 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.321417 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.332857 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.350414 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.362362 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.374336 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.374366 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.374375 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.374390 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.374401 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.380829 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.399634 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.410375 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.428186 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.444207 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:52Z\\\",\\\"message\\\":\\\"\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0120 16:31:51.909620 6427 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:31:51.911423 6427 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.477098 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.477140 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.477150 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.477170 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.477182 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.579808 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.579884 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.579907 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.579936 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.579959 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.682239 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.682283 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.682294 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.682311 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.682322 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.784446 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.784477 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.784485 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.784498 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.784507 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.887109 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.887165 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.887180 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.887201 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.887216 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.944874 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-kbdtf"] Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.945674 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:54 crc kubenswrapper[4995]: E0120 16:31:54.945768 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.954822 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 15:11:46.994148515 +0000 UTC Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.956730 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.977097 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.990070 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.990158 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.990178 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.990204 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.990225 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:54Z","lastTransitionTime":"2026-01-20T16:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:54 crc kubenswrapper[4995]: I0120 16:31:54.992022 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:54Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.009220 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.023343 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.032548 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f24f7\" (UniqueName: \"kubernetes.io/projected/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-kube-api-access-f24f7\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.032731 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.040345 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.063259 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.075157 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.089660 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.092733 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.092781 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.092796 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.092824 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.092841 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:55Z","lastTransitionTime":"2026-01-20T16:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.103622 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.126292 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.133797 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.133891 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f24f7\" (UniqueName: \"kubernetes.io/projected/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-kube-api-access-f24f7\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.134125 4995 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.134244 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs podName:9dfc8bb5-28e8-4ba3-8009-09d5585a1a12 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:55.634213174 +0000 UTC m=+33.878818020 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs") pod "network-metrics-daemon-kbdtf" (UID: "9dfc8bb5-28e8-4ba3-8009-09d5585a1a12") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.143622 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.163239 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f24f7\" (UniqueName: \"kubernetes.io/projected/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-kube-api-access-f24f7\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.173283 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:52Z\\\",\\\"message\\\":\\\"\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0120 16:31:51.909620 6427 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:31:51.911423 6427 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.189980 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.195020 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.195051 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.195060 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.195072 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.195097 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:55Z","lastTransitionTime":"2026-01-20T16:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.203738 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.214879 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.226229 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.233472 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" event={"ID":"b40e213e-290e-403c-a77a-065638455b73","Type":"ContainerStarted","Data":"a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.233539 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" event={"ID":"b40e213e-290e-403c-a77a-065638455b73","Type":"ContainerStarted","Data":"bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.247962 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.260900 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.273999 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.285423 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.297736 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.297802 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.297820 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.297847 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.297866 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:55Z","lastTransitionTime":"2026-01-20T16:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.299627 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.314482 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.344339 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.374949 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.391396 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.400107 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.400136 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.400145 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.400158 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.400168 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:55Z","lastTransitionTime":"2026-01-20T16:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.403458 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.414138 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.432346 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.443254 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.453491 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.464726 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.473650 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.488647 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:52Z\\\",\\\"message\\\":\\\"\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0120 16:31:51.909620 6427 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:31:51.911423 6427 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:55Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.502178 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.502210 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.502220 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.502235 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.502245 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:55Z","lastTransitionTime":"2026-01-20T16:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.605148 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.605182 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.605227 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.605244 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.605258 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:55Z","lastTransitionTime":"2026-01-20T16:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.639442 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.639706 4995 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.639810 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs podName:9dfc8bb5-28e8-4ba3-8009-09d5585a1a12 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:56.63978634 +0000 UTC m=+34.884391156 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs") pod "network-metrics-daemon-kbdtf" (UID: "9dfc8bb5-28e8-4ba3-8009-09d5585a1a12") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.707739 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.707818 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.707830 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.707844 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.707854 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:55Z","lastTransitionTime":"2026-01-20T16:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.740268 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.740480 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:32:11.740462068 +0000 UTC m=+49.985066874 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.810669 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.810754 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.810769 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.810798 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.810815 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:55Z","lastTransitionTime":"2026-01-20T16:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.841302 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.841387 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.841419 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.841446 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841514 4995 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841575 4995 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841588 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841595 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841623 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:11.841601568 +0000 UTC m=+50.086206374 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841634 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841644 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:11.841635579 +0000 UTC m=+50.086240475 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841655 4995 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841612 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841706 4995 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841720 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:11.84169768 +0000 UTC m=+50.086302526 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.841778 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:11.841766503 +0000 UTC m=+50.086371339 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.913591 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.913634 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.913647 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.913666 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.913681 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:55Z","lastTransitionTime":"2026-01-20T16:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.955254 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 03:07:33.945248232 +0000 UTC Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.988995 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.989042 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:55 crc kubenswrapper[4995]: I0120 16:31:55.989166 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.989187 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.989269 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:55 crc kubenswrapper[4995]: E0120 16:31:55.990216 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.016303 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.016339 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.016347 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.016360 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.016369 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.119268 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.119309 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.119319 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.119333 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.119344 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.222267 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.222317 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.222335 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.222359 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.222377 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.324853 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.324914 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.324926 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.324942 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.324952 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.428362 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.428426 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.428443 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.428468 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.428486 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.531557 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.531586 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.531595 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.531606 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.531615 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.635226 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.635294 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.635314 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.635339 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.635359 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.651301 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:56 crc kubenswrapper[4995]: E0120 16:31:56.651536 4995 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:31:56 crc kubenswrapper[4995]: E0120 16:31:56.651638 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs podName:9dfc8bb5-28e8-4ba3-8009-09d5585a1a12 nodeName:}" failed. No retries permitted until 2026-01-20 16:31:58.651618824 +0000 UTC m=+36.896223630 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs") pod "network-metrics-daemon-kbdtf" (UID: "9dfc8bb5-28e8-4ba3-8009-09d5585a1a12") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.738500 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.738536 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.738544 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.738559 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.738568 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.842443 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.842536 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.842557 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.842586 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.842609 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.945521 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.945561 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.945571 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.945586 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.945597 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:56Z","lastTransitionTime":"2026-01-20T16:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.956036 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 07:34:20.063735136 +0000 UTC Jan 20 16:31:56 crc kubenswrapper[4995]: I0120 16:31:56.988603 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:56 crc kubenswrapper[4995]: E0120 16:31:56.988836 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.048071 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.048136 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.048148 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.048164 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.048176 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.151830 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.151891 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.151927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.151955 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.151976 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.254370 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.254431 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.254450 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.254476 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.254497 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.357316 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.357402 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.357443 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.357473 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.357502 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.460094 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.460151 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.460163 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.460179 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.460190 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.562820 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.562903 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.562927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.562959 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.562982 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.665824 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.665865 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.665874 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.665887 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.665895 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.768843 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.768900 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.768919 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.768935 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.768948 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.872245 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.872323 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.872346 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.872377 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.872400 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.956508 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 22:28:57.017982108 +0000 UTC Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.974199 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.974236 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.974247 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.974261 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.974274 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:57Z","lastTransitionTime":"2026-01-20T16:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.988984 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:57 crc kubenswrapper[4995]: E0120 16:31:57.989106 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.989136 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:57 crc kubenswrapper[4995]: I0120 16:31:57.989178 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:57 crc kubenswrapper[4995]: E0120 16:31:57.989212 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:57 crc kubenswrapper[4995]: E0120 16:31:57.989373 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.076426 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.076472 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.076483 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.076501 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.076513 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.178788 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.178819 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.178828 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.178841 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.178849 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.280696 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.280759 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.280777 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.280799 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.280816 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.383578 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.383676 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.383710 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.383739 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.383765 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.485877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.485981 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.486003 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.486046 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.486066 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.588469 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.588547 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.588569 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.588594 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.588615 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.672410 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:58 crc kubenswrapper[4995]: E0120 16:31:58.672645 4995 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:31:58 crc kubenswrapper[4995]: E0120 16:31:58.672781 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs podName:9dfc8bb5-28e8-4ba3-8009-09d5585a1a12 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:02.672746168 +0000 UTC m=+40.917351014 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs") pod "network-metrics-daemon-kbdtf" (UID: "9dfc8bb5-28e8-4ba3-8009-09d5585a1a12") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.690830 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.690923 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.690946 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.690970 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.690987 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.793825 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.793915 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.793933 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.793956 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.793972 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.814629 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.831963 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.848287 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.865657 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.882101 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.894754 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.896575 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.896633 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.896651 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.896677 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.896695 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.914845 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.928032 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.957525 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 09:39:21.509689445 +0000 UTC Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.960856 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.978466 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.988948 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:31:58 crc kubenswrapper[4995]: E0120 16:31:58.989054 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.994656 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:58Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.998904 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.998984 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.999000 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.999037 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:58 crc kubenswrapper[4995]: I0120 16:31:58.999053 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:58Z","lastTransitionTime":"2026-01-20T16:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.010857 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:59Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.022439 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:59Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.043896 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:52Z\\\",\\\"message\\\":\\\"\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0120 16:31:51.909620 6427 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:31:51.911423 6427 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:59Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.055812 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:59Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.070546 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:59Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.083062 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:59Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.095355 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:59Z is after 2025-08-24T17:21:41Z" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.101009 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.101038 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.101047 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.101061 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.101069 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:59Z","lastTransitionTime":"2026-01-20T16:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.203698 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.203750 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.203767 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.203790 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.203807 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:59Z","lastTransitionTime":"2026-01-20T16:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.309163 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.309205 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.309221 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.309236 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.309248 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:59Z","lastTransitionTime":"2026-01-20T16:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.412597 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.412693 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.412711 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.412733 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.412752 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:59Z","lastTransitionTime":"2026-01-20T16:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.515792 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.515855 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.515875 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.515901 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.515919 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:59Z","lastTransitionTime":"2026-01-20T16:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.619511 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.619638 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.619664 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.619692 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.619715 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:59Z","lastTransitionTime":"2026-01-20T16:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.722985 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.723102 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.723120 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.723143 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.723160 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:59Z","lastTransitionTime":"2026-01-20T16:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.826407 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.826490 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.826522 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.826550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.826571 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:59Z","lastTransitionTime":"2026-01-20T16:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.928921 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.928985 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.928998 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.929016 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.929029 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:31:59Z","lastTransitionTime":"2026-01-20T16:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.958296 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 12:46:37.910084999 +0000 UTC Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.988970 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.989049 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:31:59 crc kubenswrapper[4995]: I0120 16:31:59.989261 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:31:59 crc kubenswrapper[4995]: E0120 16:31:59.989258 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:31:59 crc kubenswrapper[4995]: E0120 16:31:59.989328 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:31:59 crc kubenswrapper[4995]: E0120 16:31:59.989376 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.031658 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.031727 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.031743 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.031767 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.031785 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.134603 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.134669 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.134686 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.134712 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.134730 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.237957 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.238009 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.238029 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.238053 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.238070 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.340877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.340923 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.340931 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.340945 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.340956 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.442905 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.442964 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.442980 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.443000 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.443014 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.545495 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.545574 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.545595 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.545626 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.545647 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.648480 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.648525 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.648534 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.648551 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.648562 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.713227 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.713267 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.713275 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.713289 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.713298 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: E0120 16:32:00.729028 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:00Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.732883 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.732928 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.732938 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.732951 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.732960 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: E0120 16:32:00.751920 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:00Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.756530 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.756563 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.756573 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.756586 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.756596 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: E0120 16:32:00.775879 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:00Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.780351 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.780402 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.780419 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.780441 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.780458 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: E0120 16:32:00.798466 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:00Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.802802 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.802868 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.802893 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.802922 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.802943 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: E0120 16:32:00.829063 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:00Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:00 crc kubenswrapper[4995]: E0120 16:32:00.829542 4995 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.831327 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.831448 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.831537 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.831622 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.831712 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.935336 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.935389 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.935409 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.935437 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.935475 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:00Z","lastTransitionTime":"2026-01-20T16:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.959343 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 14:25:08.249023038 +0000 UTC Jan 20 16:32:00 crc kubenswrapper[4995]: I0120 16:32:00.988879 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:00 crc kubenswrapper[4995]: E0120 16:32:00.989438 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.038382 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.038452 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.038476 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.038504 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.038527 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.141397 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.141465 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.141488 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.141519 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.141541 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.244733 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.244800 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.244824 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.244854 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.244876 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.347805 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.347845 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.347854 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.347868 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.347877 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.450775 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.450854 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.450878 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.450913 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.450939 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.553602 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.553685 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.553710 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.553739 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.553757 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.657235 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.657271 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.657280 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.657294 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.657303 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.760035 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.760533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.760553 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.760577 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.760597 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.864196 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.864254 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.864270 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.864291 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.864306 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.960551 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 08:41:29.442642079 +0000 UTC Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.967041 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.967162 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.967183 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.967252 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.967275 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:01Z","lastTransitionTime":"2026-01-20T16:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.989614 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.989722 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:01 crc kubenswrapper[4995]: I0120 16:32:01.989771 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:01 crc kubenswrapper[4995]: E0120 16:32:01.989899 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:01 crc kubenswrapper[4995]: E0120 16:32:01.990029 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:01 crc kubenswrapper[4995]: E0120 16:32:01.990212 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.013500 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.031290 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.044909 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.069349 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.069383 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.069395 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.069421 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.069432 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.079162 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.098665 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.128200 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:52Z\\\",\\\"message\\\":\\\"\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0120 16:31:51.909620 6427 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:31:51.911423 6427 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.149242 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.171033 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.171801 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.171855 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.171875 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.171900 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.171918 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.193162 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.212018 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.226387 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.237855 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.262872 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.275846 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.275906 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.275925 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.275950 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.275968 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.281983 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.300108 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.326861 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.346577 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:02Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.379223 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.379270 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.379282 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.379299 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.379310 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.482437 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.482529 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.482556 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.482985 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.483006 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.586571 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.586679 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.586698 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.586723 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.586742 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.689854 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.689906 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.689914 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.689928 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.689937 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.717849 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:02 crc kubenswrapper[4995]: E0120 16:32:02.718007 4995 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:32:02 crc kubenswrapper[4995]: E0120 16:32:02.718123 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs podName:9dfc8bb5-28e8-4ba3-8009-09d5585a1a12 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:10.718101252 +0000 UTC m=+48.962706138 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs") pod "network-metrics-daemon-kbdtf" (UID: "9dfc8bb5-28e8-4ba3-8009-09d5585a1a12") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.792372 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.792417 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.792429 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.792446 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.792457 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.894613 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.894682 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.894715 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.894743 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.894763 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.960655 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 12:30:45.82758723 +0000 UTC Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.989292 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:02 crc kubenswrapper[4995]: E0120 16:32:02.989439 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.996502 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.996540 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.996549 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.996586 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:02 crc kubenswrapper[4995]: I0120 16:32:02.996600 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:02Z","lastTransitionTime":"2026-01-20T16:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.099870 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.099949 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.099973 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.100000 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.100023 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:03Z","lastTransitionTime":"2026-01-20T16:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.203251 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.203330 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.203355 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.203388 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.203412 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:03Z","lastTransitionTime":"2026-01-20T16:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.307125 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.307225 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.307244 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.307313 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.307331 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:03Z","lastTransitionTime":"2026-01-20T16:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.413132 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.413737 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.413767 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.413783 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.413792 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:03Z","lastTransitionTime":"2026-01-20T16:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.516957 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.517028 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.517046 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.517124 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.517143 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:03Z","lastTransitionTime":"2026-01-20T16:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.620240 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.620324 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.620339 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.620356 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.620369 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:03Z","lastTransitionTime":"2026-01-20T16:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.723265 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.723356 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.723385 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.723418 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.723444 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:03Z","lastTransitionTime":"2026-01-20T16:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.827144 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.827205 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.827227 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.827254 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.827275 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:03Z","lastTransitionTime":"2026-01-20T16:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.930927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.930983 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.931000 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.931025 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.931042 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:03Z","lastTransitionTime":"2026-01-20T16:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.960888 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 17:37:36.533626314 +0000 UTC Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.989470 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.989522 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:03 crc kubenswrapper[4995]: I0120 16:32:03.989506 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:03 crc kubenswrapper[4995]: E0120 16:32:03.989656 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:03 crc kubenswrapper[4995]: E0120 16:32:03.989843 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:03 crc kubenswrapper[4995]: E0120 16:32:03.990038 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.033478 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.033593 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.033617 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.033649 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.033674 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.136212 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.136290 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.136315 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.136346 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.136367 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.238698 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.238765 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.238782 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.238811 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.238827 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.341554 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.341594 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.341603 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.341616 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.341626 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.444547 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.444620 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.444639 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.444667 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.444687 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.547563 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.547634 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.547657 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.547685 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.547706 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.651222 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.651286 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.651311 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.651340 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.651362 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.754132 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.754202 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.754227 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.754255 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.754276 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.857879 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.857946 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.857966 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.857992 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.858009 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.961143 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 07:37:11.830547581 +0000 UTC Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.961512 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.961581 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.961610 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.961642 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.961700 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:04Z","lastTransitionTime":"2026-01-20T16:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.988892 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:04 crc kubenswrapper[4995]: E0120 16:32:04.989121 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:04 crc kubenswrapper[4995]: I0120 16:32:04.990233 4995 scope.go:117] "RemoveContainer" containerID="d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.065217 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.065280 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.065299 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.065323 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.065341 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.168028 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.168065 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.168093 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.168108 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.168121 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.270026 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.270121 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.270147 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.270175 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.270199 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.271373 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/1.log" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.274675 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.275053 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.291703 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:52Z\\\",\\\"message\\\":\\\"\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0120 16:31:51.909620 6427 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:31:51.911423 6427 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.307372 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.320489 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.332569 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.348897 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.367527 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.373677 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.373732 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.373750 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.373773 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.373790 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.384936 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.400295 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.414832 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.430361 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.445565 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.459323 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.468278 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.475828 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.475877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.475889 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.475907 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.475916 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.485327 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.495425 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.505868 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.518509 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.578479 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.578533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.578541 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.578554 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.578564 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.681152 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.681196 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.681209 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.681225 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.681238 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.783769 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.783821 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.783830 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.783844 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.783853 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.886045 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.886126 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.886146 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.886169 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.886186 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.962259 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 16:43:56.252059931 +0000 UTC Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.988540 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.988565 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:05 crc kubenswrapper[4995]: E0120 16:32:05.988663 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.988545 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.988774 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.988835 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:05 crc kubenswrapper[4995]: E0120 16:32:05.988838 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.988862 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.988914 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:05 crc kubenswrapper[4995]: I0120 16:32:05.988937 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:05Z","lastTransitionTime":"2026-01-20T16:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:05 crc kubenswrapper[4995]: E0120 16:32:05.989035 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.092416 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.092450 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.092460 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.092472 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.092481 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:06Z","lastTransitionTime":"2026-01-20T16:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.195765 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.195839 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.195871 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.195900 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.195920 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:06Z","lastTransitionTime":"2026-01-20T16:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.281463 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/2.log" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.282700 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/1.log" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.287275 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908" exitCode=1 Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.287411 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.287689 4995 scope.go:117] "RemoveContainer" containerID="d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.288462 4995 scope.go:117] "RemoveContainer" containerID="e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908" Jan 20 16:32:06 crc kubenswrapper[4995]: E0120 16:32:06.288792 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.298192 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.298245 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.298268 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.298300 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.298324 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:06Z","lastTransitionTime":"2026-01-20T16:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.308165 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.328352 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.346401 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.362780 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.383497 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.401487 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.401548 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.401560 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.401579 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.401591 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:06Z","lastTransitionTime":"2026-01-20T16:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.403345 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.427019 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.440240 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.456310 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.470722 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.482837 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.504784 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.505068 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.505228 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.505362 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.505479 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:06Z","lastTransitionTime":"2026-01-20T16:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.511419 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.533346 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.548209 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.562277 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.573349 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.608300 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.608338 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.608346 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.608361 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.608370 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:06Z","lastTransitionTime":"2026-01-20T16:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.629196 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d98139f8547515d2d985f520ec195e6df74325ba1185812178420c3db11a64cd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:31:52Z\\\",\\\"message\\\":\\\"\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF0120 16:31:51.909620 6427 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:31:51Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:31:51.911423 6427 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUI\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:06Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.710250 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.710489 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.710624 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.710709 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.710791 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:06Z","lastTransitionTime":"2026-01-20T16:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.814214 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.814261 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.814278 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.814301 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.814317 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:06Z","lastTransitionTime":"2026-01-20T16:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.917611 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.917921 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.918063 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.918215 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.918318 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:06Z","lastTransitionTime":"2026-01-20T16:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.962948 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 20:55:30.971749501 +0000 UTC Jan 20 16:32:06 crc kubenswrapper[4995]: I0120 16:32:06.989358 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:06 crc kubenswrapper[4995]: E0120 16:32:06.989507 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.021202 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.021551 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.021756 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.021938 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.022144 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.125294 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.125356 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.125373 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.125394 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.125410 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.227308 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.227581 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.227645 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.227710 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.227815 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.292947 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/2.log" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.297154 4995 scope.go:117] "RemoveContainer" containerID="e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908" Jan 20 16:32:07 crc kubenswrapper[4995]: E0120 16:32:07.297317 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.311991 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.325171 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.330163 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.330197 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.330207 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.330221 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.330231 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.338152 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.349869 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.358376 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.372596 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.384442 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.396515 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.411994 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.426236 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.432278 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.432331 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.432348 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.432390 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.432405 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.442834 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.456140 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.471785 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.484229 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.504221 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.519352 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.535227 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.535274 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.535288 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.535310 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.535326 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.547549 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:07Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.638510 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.638571 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.638595 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.638625 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.638651 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.742243 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.742331 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.742353 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.742383 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.742404 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.844311 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.844354 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.844366 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.844383 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.844395 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.947200 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.947257 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.947275 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.947297 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.947314 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:07Z","lastTransitionTime":"2026-01-20T16:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.963762 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 16:20:44.122800164 +0000 UTC Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.989686 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.989746 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:07 crc kubenswrapper[4995]: E0120 16:32:07.990252 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:07 crc kubenswrapper[4995]: E0120 16:32:07.990351 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:07 crc kubenswrapper[4995]: I0120 16:32:07.989824 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:07 crc kubenswrapper[4995]: E0120 16:32:07.990479 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.050291 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.050374 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.050397 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.050427 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.050452 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.153929 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.153983 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.153994 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.154014 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.154027 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.257024 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.257069 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.257163 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.257195 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.257216 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.359867 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.359927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.359943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.359968 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.359985 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.463186 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.463286 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.463306 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.463329 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.463346 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.566059 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.566159 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.566184 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.566213 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.566234 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.668990 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.669042 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.669116 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.669151 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.669173 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.771952 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.772040 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.772058 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.772109 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.772143 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.884052 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.884170 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.884191 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.884215 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.884232 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.964822 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 23:31:12.799486793 +0000 UTC Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.987029 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.987163 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.987190 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.987221 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.987238 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:08Z","lastTransitionTime":"2026-01-20T16:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:08 crc kubenswrapper[4995]: I0120 16:32:08.989385 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:08 crc kubenswrapper[4995]: E0120 16:32:08.989631 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.090241 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.090341 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.090368 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.090414 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.090449 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:09Z","lastTransitionTime":"2026-01-20T16:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.192851 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.192927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.192946 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.192966 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.192982 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:09Z","lastTransitionTime":"2026-01-20T16:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.296064 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.296187 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.296242 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.296269 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.296285 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:09Z","lastTransitionTime":"2026-01-20T16:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.399611 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.399678 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.399697 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.399725 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.399744 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:09Z","lastTransitionTime":"2026-01-20T16:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.502469 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.502533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.502541 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.502555 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.502565 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:09Z","lastTransitionTime":"2026-01-20T16:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.605142 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.605185 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.605196 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.605214 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.605229 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:09Z","lastTransitionTime":"2026-01-20T16:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.708526 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.708573 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.708611 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.708638 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.708654 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:09Z","lastTransitionTime":"2026-01-20T16:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.812463 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.812519 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.812536 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.812561 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.812578 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:09Z","lastTransitionTime":"2026-01-20T16:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.915447 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.915510 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.915523 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.915539 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.915551 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:09Z","lastTransitionTime":"2026-01-20T16:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.965467 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 13:43:30.624817678 +0000 UTC Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.988916 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:09 crc kubenswrapper[4995]: E0120 16:32:09.989042 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.988916 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:09 crc kubenswrapper[4995]: I0120 16:32:09.989108 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:09 crc kubenswrapper[4995]: E0120 16:32:09.989180 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:09 crc kubenswrapper[4995]: E0120 16:32:09.989356 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.018754 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.018808 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.018826 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.018849 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.018868 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.122574 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.122662 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.122688 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.122724 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.122765 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.226866 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.226953 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.227004 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.227028 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.227044 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.329729 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.329788 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.329798 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.329813 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.329823 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.433283 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.433340 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.433348 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.433363 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.433373 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.536608 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.536643 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.536651 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.536663 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.536672 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.639121 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.639167 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.639183 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.639199 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.639210 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.653292 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.662817 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.667163 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.680997 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.694999 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.705418 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.716592 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.727282 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.741114 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.742684 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.742748 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.742765 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.742791 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.742809 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.760363 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.776234 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.789814 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.804378 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:10 crc kubenswrapper[4995]: E0120 16:32:10.804501 4995 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:32:10 crc kubenswrapper[4995]: E0120 16:32:10.804582 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs podName:9dfc8bb5-28e8-4ba3-8009-09d5585a1a12 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:26.804560535 +0000 UTC m=+65.049165381 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs") pod "network-metrics-daemon-kbdtf" (UID: "9dfc8bb5-28e8-4ba3-8009-09d5585a1a12") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.807976 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.817059 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.842685 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.845103 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.845154 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.845171 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.845194 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.845212 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.854903 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.868988 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.883103 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.904825 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:10Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.948167 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.948233 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.948255 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.948286 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.948308 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:10Z","lastTransitionTime":"2026-01-20T16:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.965896 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 08:27:55.590316491 +0000 UTC Jan 20 16:32:10 crc kubenswrapper[4995]: I0120 16:32:10.989187 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:10 crc kubenswrapper[4995]: E0120 16:32:10.989325 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.050740 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.050773 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.050781 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.050794 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.050803 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.060617 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.060681 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.060717 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.060754 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.060775 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.074748 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:11Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.078565 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.078594 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.078603 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.078615 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.078624 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.089980 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:11Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.094877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.094913 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.094927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.094942 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.094952 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.105967 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:11Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.110506 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.110591 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.110626 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.110656 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.110673 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.132115 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:11Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.137407 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.137457 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.137468 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.137487 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.137498 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.149095 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:11Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.149252 4995 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.153373 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.153410 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.153423 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.153439 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.153451 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.256442 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.256501 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.256519 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.256542 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.256561 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.359190 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.359265 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.359287 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.359316 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.359340 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.461432 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.461466 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.461475 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.461489 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.461500 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.564749 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.564788 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.564797 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.564810 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.564819 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.667645 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.667706 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.667722 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.667745 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.667761 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.770394 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.770472 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.770488 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.770511 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.770528 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.814561 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.814731 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:32:43.8146944 +0000 UTC m=+82.059299246 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.874613 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.874704 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.874727 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.874752 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.874769 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.915496 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.915559 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.915602 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.915655 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915726 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915769 4995 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915771 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915804 4995 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915843 4995 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915866 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915852 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:43.915828659 +0000 UTC m=+82.160433505 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915917 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915939 4995 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915944 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:43.915923542 +0000 UTC m=+82.160528378 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.915973 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:43.915961383 +0000 UTC m=+82.160566219 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.916037 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:43.916003804 +0000 UTC m=+82.160608650 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.966958 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 06:38:15.96662517 +0000 UTC Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.977554 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.977619 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.977642 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.977675 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.977699 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:11Z","lastTransitionTime":"2026-01-20T16:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.988967 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.989046 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.989182 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:11 crc kubenswrapper[4995]: I0120 16:32:11.989209 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.989353 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:11 crc kubenswrapper[4995]: E0120 16:32:11.989437 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.010539 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.031100 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.047009 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.060674 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.070803 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.080016 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.080051 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.080061 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.080092 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.080107 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:12Z","lastTransitionTime":"2026-01-20T16:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.088435 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.099445 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.113555 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.132913 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.145456 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.156647 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.174141 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.182414 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.182456 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.182470 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.182489 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.182504 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:12Z","lastTransitionTime":"2026-01-20T16:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.184105 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.193424 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.209889 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.221452 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.238999 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.254733 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:12Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.285222 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.285263 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.285294 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.285309 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.285319 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:12Z","lastTransitionTime":"2026-01-20T16:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.387633 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.387694 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.387711 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.387735 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.387751 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:12Z","lastTransitionTime":"2026-01-20T16:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.491600 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.491665 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.491682 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.491706 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.491724 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:12Z","lastTransitionTime":"2026-01-20T16:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.594681 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.594748 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.594764 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.594787 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.594804 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:12Z","lastTransitionTime":"2026-01-20T16:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.699424 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.699499 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.699519 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.699548 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.699577 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:12Z","lastTransitionTime":"2026-01-20T16:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.802632 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.802684 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.802701 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.802724 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.802741 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:12Z","lastTransitionTime":"2026-01-20T16:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.905957 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.906137 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.906161 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.906186 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.906206 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:12Z","lastTransitionTime":"2026-01-20T16:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.968146 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 00:11:06.955365232 +0000 UTC Jan 20 16:32:12 crc kubenswrapper[4995]: I0120 16:32:12.989046 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:12 crc kubenswrapper[4995]: E0120 16:32:12.989270 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.009609 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.009669 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.009687 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.009712 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.009730 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.112801 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.112864 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.112880 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.112907 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.112924 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.218050 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.218179 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.218207 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.218237 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.218267 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.321463 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.321543 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.321565 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.321590 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.321608 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.424247 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.424297 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.424309 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.424327 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.424339 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.527001 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.527070 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.527173 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.527208 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.527231 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.630125 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.630190 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.630208 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.630237 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.630256 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.733644 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.733703 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.733720 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.733746 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.733766 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.837165 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.837226 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.837243 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.837266 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.837285 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.940456 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.940523 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.940548 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.940572 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.940589 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:13Z","lastTransitionTime":"2026-01-20T16:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.968879 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 02:10:23.024596305 +0000 UTC Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.989496 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.989590 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:13 crc kubenswrapper[4995]: I0120 16:32:13.989591 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:13 crc kubenswrapper[4995]: E0120 16:32:13.989690 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:13 crc kubenswrapper[4995]: E0120 16:32:13.989855 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:13 crc kubenswrapper[4995]: E0120 16:32:13.990052 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.044643 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.044708 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.044735 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.044768 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.044792 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.147536 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.147598 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.147611 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.147629 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.147643 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.255198 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.255274 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.255292 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.255316 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.255334 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.359342 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.359397 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.359414 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.359440 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.359457 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.462461 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.462514 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.462531 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.462552 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.462570 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.566558 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.566618 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.566635 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.566659 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.566677 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.670119 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.670160 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.670171 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.670187 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.670198 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.772955 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.772993 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.773006 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.773028 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.773051 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.876457 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.876520 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.876535 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.876560 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.876574 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.969980 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 07:46:17.748051589 +0000 UTC Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.979980 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.980051 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.980073 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.980099 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.980197 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:14Z","lastTransitionTime":"2026-01-20T16:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:14 crc kubenswrapper[4995]: I0120 16:32:14.989334 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:14 crc kubenswrapper[4995]: E0120 16:32:14.989566 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.083187 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.083265 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.083284 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.083309 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.083326 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:15Z","lastTransitionTime":"2026-01-20T16:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.186524 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.186586 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.186603 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.186629 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.186648 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:15Z","lastTransitionTime":"2026-01-20T16:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.290399 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.290460 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.290486 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.290519 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.290539 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:15Z","lastTransitionTime":"2026-01-20T16:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.393749 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.393809 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.393827 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.393854 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.393870 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:15Z","lastTransitionTime":"2026-01-20T16:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.497609 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.497664 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.497679 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.497721 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.497738 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:15Z","lastTransitionTime":"2026-01-20T16:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.600861 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.600901 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.600910 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.600932 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.600942 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:15Z","lastTransitionTime":"2026-01-20T16:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.703814 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.703883 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.703908 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.703938 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.703961 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:15Z","lastTransitionTime":"2026-01-20T16:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.806680 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.806740 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.806757 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.806784 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.806828 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:15Z","lastTransitionTime":"2026-01-20T16:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.910562 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.910622 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.910640 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.910666 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.910686 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:15Z","lastTransitionTime":"2026-01-20T16:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.970898 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 10:25:37.649297808 +0000 UTC Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.989393 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.989439 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:15 crc kubenswrapper[4995]: I0120 16:32:15.989490 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:15 crc kubenswrapper[4995]: E0120 16:32:15.989672 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:15 crc kubenswrapper[4995]: E0120 16:32:15.989817 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:15 crc kubenswrapper[4995]: E0120 16:32:15.989994 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.013516 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.013601 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.013619 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.013676 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.013694 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.116687 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.116731 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.116741 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.116756 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.116768 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.219560 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.219632 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.219652 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.219678 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.219696 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.322290 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.322366 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.322378 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.322399 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.322416 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.425534 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.425607 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.425627 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.425655 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.425677 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.528008 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.528066 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.528120 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.528148 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.528167 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.630882 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.630943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.630961 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.630984 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.631002 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.733654 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.733704 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.733715 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.733733 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.733745 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.840947 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.841031 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.841058 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.841138 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.841165 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.944551 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.944630 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.944664 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.944693 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.944714 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:16Z","lastTransitionTime":"2026-01-20T16:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.971823 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 18:32:06.504846778 +0000 UTC Jan 20 16:32:16 crc kubenswrapper[4995]: I0120 16:32:16.989671 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:16 crc kubenswrapper[4995]: E0120 16:32:16.989831 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.047183 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.047244 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.047265 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.047291 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.047309 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.150925 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.151016 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.151049 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.151122 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.151146 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.253536 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.253608 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.253633 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.253665 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.253688 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.356886 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.356989 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.357164 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.357201 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.357226 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.460048 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.460198 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.460226 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.460262 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.460289 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.563660 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.563721 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.563739 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.563770 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.563792 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.667072 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.667158 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.667174 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.667197 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.667215 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.770766 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.770833 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.770853 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.770877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.770894 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.874004 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.874037 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.874045 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.874058 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.874068 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.972346 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 18:17:44.613956989 +0000 UTC Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.976433 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.976478 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.976500 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.976527 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.976550 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:17Z","lastTransitionTime":"2026-01-20T16:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.988620 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:17 crc kubenswrapper[4995]: E0120 16:32:17.988781 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.989058 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:17 crc kubenswrapper[4995]: I0120 16:32:17.989199 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:17 crc kubenswrapper[4995]: E0120 16:32:17.989366 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:17 crc kubenswrapper[4995]: E0120 16:32:17.989478 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.079156 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.079216 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.079241 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.079268 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.079287 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:18Z","lastTransitionTime":"2026-01-20T16:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.182268 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.182337 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.182357 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.182385 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.182407 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:18Z","lastTransitionTime":"2026-01-20T16:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.285828 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.285887 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.285913 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.285943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.285964 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:18Z","lastTransitionTime":"2026-01-20T16:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.389038 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.389123 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.389142 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.389167 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.389183 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:18Z","lastTransitionTime":"2026-01-20T16:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.491424 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.491477 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.491494 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.491516 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.491534 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:18Z","lastTransitionTime":"2026-01-20T16:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.594347 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.594417 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.594440 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.594471 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.594492 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:18Z","lastTransitionTime":"2026-01-20T16:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.697749 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.697808 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.697830 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.697856 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.697877 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:18Z","lastTransitionTime":"2026-01-20T16:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.800697 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.800736 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.800746 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.800762 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.800774 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:18Z","lastTransitionTime":"2026-01-20T16:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.903431 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.903507 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.903532 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.903563 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:18 crc kubenswrapper[4995]: I0120 16:32:18.903589 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:18Z","lastTransitionTime":"2026-01-20T16:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.020084 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 05:44:54.630399911 +0000 UTC Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.020217 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:19 crc kubenswrapper[4995]: E0120 16:32:19.020491 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.021680 4995 scope.go:117] "RemoveContainer" containerID="e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908" Jan 20 16:32:19 crc kubenswrapper[4995]: E0120 16:32:19.021937 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.023139 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.023199 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.023221 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.023249 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.023271 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.126071 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.126155 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.126172 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.126195 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.126217 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.228770 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.228821 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.228839 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.228862 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.228881 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.332091 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.332127 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.332137 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.332150 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.332159 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.435048 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.435428 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.435572 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.435725 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.435902 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.539215 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.539314 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.539337 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.539754 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.539982 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.643704 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.643788 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.643813 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.643842 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.643860 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.746615 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.746698 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.746727 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.746757 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.746777 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.850143 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.850314 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.850346 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.850375 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.850396 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.953714 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.953769 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.953785 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.953809 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.953827 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:19Z","lastTransitionTime":"2026-01-20T16:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.988616 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:19 crc kubenswrapper[4995]: E0120 16:32:19.988801 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.989173 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:19 crc kubenswrapper[4995]: E0120 16:32:19.989323 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:19 crc kubenswrapper[4995]: I0120 16:32:19.989625 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:19 crc kubenswrapper[4995]: E0120 16:32:19.989902 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.020808 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 14:22:00.659666698 +0000 UTC Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.056764 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.056860 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.056880 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.056903 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.056920 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.160485 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.160539 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.160557 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.160580 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.160596 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.263192 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.263247 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.263264 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.263286 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.263303 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.366176 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.366230 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.366248 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.366272 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.366487 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.470186 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.470254 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.470276 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.470307 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.470325 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.573261 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.573324 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.573338 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.573364 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.573379 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.676719 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.676784 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.676804 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.676829 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.676847 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.779922 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.779980 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.779992 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.780016 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.780028 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.883370 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.883411 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.883424 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.883443 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.883456 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.986145 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.986185 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.986199 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.986215 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.986228 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:20Z","lastTransitionTime":"2026-01-20T16:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:20 crc kubenswrapper[4995]: I0120 16:32:20.988719 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:20 crc kubenswrapper[4995]: E0120 16:32:20.988826 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.021752 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 02:23:30.797975262 +0000 UTC Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.088520 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.088583 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.088607 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.088631 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.088648 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.191214 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.191247 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.191259 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.191272 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.191281 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.289576 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.289645 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.289669 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.289701 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.289724 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: E0120 16:32:21.310843 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:21Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.315859 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.315909 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.315925 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.315944 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.315962 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: E0120 16:32:21.329064 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:21Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.333709 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.333756 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.333772 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.333795 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.333810 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: E0120 16:32:21.352451 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:21Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.356038 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.356093 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.356107 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.356124 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.356135 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: E0120 16:32:21.367534 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:21Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.372312 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.372350 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.372362 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.372379 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.372391 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: E0120 16:32:21.382524 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:21Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:21 crc kubenswrapper[4995]: E0120 16:32:21.382665 4995 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.384694 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.384778 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.384799 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.384824 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.384844 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.488058 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.488114 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.488127 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.488142 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.488154 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.590585 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.590639 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.590656 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.590679 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.590697 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.693491 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.693537 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.693550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.693568 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.693582 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.796781 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.796835 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.796846 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.796862 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.796874 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.899807 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.899852 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.899866 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.899880 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.899889 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:21Z","lastTransitionTime":"2026-01-20T16:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.988732 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.988840 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:21 crc kubenswrapper[4995]: E0120 16:32:21.988919 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:21 crc kubenswrapper[4995]: E0120 16:32:21.989238 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:21 crc kubenswrapper[4995]: I0120 16:32:21.989349 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:21 crc kubenswrapper[4995]: E0120 16:32:21.989509 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.006408 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.006484 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.006509 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.007357 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.007476 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.018403 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.022118 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 05:47:23.336241686 +0000 UTC Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.040532 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.059723 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.089750 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.108239 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.110360 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.110412 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.110432 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.110463 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.110486 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.124209 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.144554 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.158027 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.175324 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.192288 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.208773 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.213444 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.213551 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.213578 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.214217 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.214499 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.225227 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.258040 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.288815 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.308749 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.316984 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.317018 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.317027 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.317041 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.317053 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.321561 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.333161 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.344205 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:22Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.419885 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.419943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.419966 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.419994 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.420018 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.523181 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.523226 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.523240 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.523259 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.523275 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.625912 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.626293 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.626314 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.626342 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.626360 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.728598 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.728670 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.728710 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.728739 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.728759 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.832533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.832607 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.832625 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.832653 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.832670 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.935135 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.935176 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.935188 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.935203 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.935215 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:22Z","lastTransitionTime":"2026-01-20T16:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:22 crc kubenswrapper[4995]: I0120 16:32:22.989519 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:22 crc kubenswrapper[4995]: E0120 16:32:22.989694 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.022983 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 18:28:15.439116142 +0000 UTC Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.043451 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.043523 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.043552 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.043577 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.043606 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.147664 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.147720 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.147735 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.147757 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.147771 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.251628 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.251757 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.251796 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.251825 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.251850 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.354795 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.354835 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.354846 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.354867 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.354881 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.457142 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.457174 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.457182 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.457194 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.457205 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.559681 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.559753 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.559775 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.559804 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.559830 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.662690 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.662752 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.662769 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.662793 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.662810 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.765494 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.765549 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.765565 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.765588 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.765631 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.869543 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.869606 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.869625 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.869649 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.869669 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.972573 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.972606 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.972616 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.972634 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.972643 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:23Z","lastTransitionTime":"2026-01-20T16:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.989542 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:23 crc kubenswrapper[4995]: E0120 16:32:23.989660 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.989702 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:23 crc kubenswrapper[4995]: I0120 16:32:23.989748 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:23 crc kubenswrapper[4995]: E0120 16:32:23.989913 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:23 crc kubenswrapper[4995]: E0120 16:32:23.989980 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.024025 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 03:37:27.556301785 +0000 UTC Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.075322 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.075352 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.075363 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.075376 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.075385 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:24Z","lastTransitionTime":"2026-01-20T16:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.178443 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.178490 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.178507 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.178531 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.178550 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:24Z","lastTransitionTime":"2026-01-20T16:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.281939 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.282000 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.282022 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.282118 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.282144 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:24Z","lastTransitionTime":"2026-01-20T16:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.384912 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.384971 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.384995 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.385025 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.385049 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:24Z","lastTransitionTime":"2026-01-20T16:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.488652 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.488734 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.488761 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.488791 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.488817 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:24Z","lastTransitionTime":"2026-01-20T16:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.591344 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.591386 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.591398 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.591413 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.591424 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:24Z","lastTransitionTime":"2026-01-20T16:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.693476 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.693542 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.693559 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.693587 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.693604 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:24Z","lastTransitionTime":"2026-01-20T16:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.796150 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.796222 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.796244 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.796272 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.796293 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:24Z","lastTransitionTime":"2026-01-20T16:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.899567 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.899638 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.899661 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.899698 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.899721 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:24Z","lastTransitionTime":"2026-01-20T16:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:24 crc kubenswrapper[4995]: I0120 16:32:24.988656 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:24 crc kubenswrapper[4995]: E0120 16:32:24.988864 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.002260 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.002306 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.002319 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.002337 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.002348 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.024642 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 00:38:25.454287731 +0000 UTC Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.105685 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.105741 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.105757 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.105777 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.105791 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.208404 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.208451 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.208463 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.208480 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.208492 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.311712 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.311760 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.311771 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.311789 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.311800 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.414658 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.414717 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.414742 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.414777 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.414801 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.517509 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.517550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.517561 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.517577 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.517588 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.619973 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.620042 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.620050 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.620122 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.620137 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.725340 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.725375 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.725385 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.725399 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.725409 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.830864 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.830938 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.830960 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.830988 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.831008 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.932982 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.933039 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.933058 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.933119 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.933140 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:25Z","lastTransitionTime":"2026-01-20T16:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.988621 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.988667 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:25 crc kubenswrapper[4995]: E0120 16:32:25.988792 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:25 crc kubenswrapper[4995]: I0120 16:32:25.988816 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:25 crc kubenswrapper[4995]: E0120 16:32:25.989009 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:25 crc kubenswrapper[4995]: E0120 16:32:25.989119 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.025183 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 06:16:31.157218922 +0000 UTC Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.035045 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.035065 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.035075 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.035104 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.035114 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.138044 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.138128 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.138146 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.138166 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.138178 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.240686 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.240970 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.241045 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.241143 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.241215 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.344372 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.344417 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.344425 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.344441 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.344472 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.446875 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.447182 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.447302 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.447419 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.447516 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.550180 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.550506 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.550622 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.550714 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.550789 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.653998 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.654063 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.654122 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.654153 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.654174 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.757243 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.757289 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.757304 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.757324 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.757339 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.806439 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:26 crc kubenswrapper[4995]: E0120 16:32:26.806626 4995 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:32:26 crc kubenswrapper[4995]: E0120 16:32:26.806696 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs podName:9dfc8bb5-28e8-4ba3-8009-09d5585a1a12 nodeName:}" failed. No retries permitted until 2026-01-20 16:32:58.80667744 +0000 UTC m=+97.051282256 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs") pod "network-metrics-daemon-kbdtf" (UID: "9dfc8bb5-28e8-4ba3-8009-09d5585a1a12") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.860368 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.860429 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.860440 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.860463 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.860477 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.963480 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.963528 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.963545 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.963567 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.963583 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:26Z","lastTransitionTime":"2026-01-20T16:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:26 crc kubenswrapper[4995]: I0120 16:32:26.989112 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:26 crc kubenswrapper[4995]: E0120 16:32:26.989246 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.026026 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 08:24:14.619971376 +0000 UTC Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.066504 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.066550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.066571 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.066598 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.066619 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.168517 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.168563 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.168578 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.168598 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.168614 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.271325 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.271364 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.271375 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.271389 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.271401 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.373104 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.373134 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.373144 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.373157 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.373165 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.476183 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.476239 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.476257 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.476280 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.476296 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.578467 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.578511 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.578524 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.578542 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.578555 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.680721 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.680751 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.680761 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.680772 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.680782 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.783796 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.783851 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.783864 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.783884 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.783897 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.886735 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.886791 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.886808 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.886831 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.886848 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.988537 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:27 crc kubenswrapper[4995]: E0120 16:32:27.988634 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.988633 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.988678 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.988743 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.988777 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.988790 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.988807 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:27 crc kubenswrapper[4995]: I0120 16:32:27.988815 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:27Z","lastTransitionTime":"2026-01-20T16:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:27 crc kubenswrapper[4995]: E0120 16:32:27.988833 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:27 crc kubenswrapper[4995]: E0120 16:32:27.989179 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.027046 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 08:00:34.966906143 +0000 UTC Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.091333 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.091398 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.091420 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.091448 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.091469 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:28Z","lastTransitionTime":"2026-01-20T16:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.194001 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.194036 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.194044 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.194058 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.194068 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:28Z","lastTransitionTime":"2026-01-20T16:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.300892 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.300926 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.300935 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.300949 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.300958 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:28Z","lastTransitionTime":"2026-01-20T16:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.375234 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/0.log" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.375283 4995 generic.go:334] "Generic (PLEG): container finished" podID="5008a882-4540-4ebe-8a27-53f0de0cbd4a" containerID="f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1" exitCode=1 Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.375314 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vlvwg" event={"ID":"5008a882-4540-4ebe-8a27-53f0de0cbd4a","Type":"ContainerDied","Data":"f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.375715 4995 scope.go:117] "RemoveContainer" containerID="f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.389242 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:27Z\\\",\\\"message\\\":\\\"2026-01-20T16:31:42+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49\\\\n2026-01-20T16:31:42+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49 to /host/opt/cni/bin/\\\\n2026-01-20T16:31:42Z [verbose] multus-daemon started\\\\n2026-01-20T16:31:42Z [verbose] Readiness Indicator file check\\\\n2026-01-20T16:32:27Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.402832 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.402908 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.402932 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.402940 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.402956 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.402964 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:28Z","lastTransitionTime":"2026-01-20T16:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.430702 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.446907 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.460348 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.471985 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.490342 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.500203 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.507314 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.507430 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.507506 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.507583 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.507644 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:28Z","lastTransitionTime":"2026-01-20T16:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.512365 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.522396 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.531984 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.543263 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.554932 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.567588 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.582513 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.595695 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.606713 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.609878 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.609909 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.609918 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.609931 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.609943 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:28Z","lastTransitionTime":"2026-01-20T16:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.618158 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:28Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.712185 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.712218 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.712228 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.712240 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.712248 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:28Z","lastTransitionTime":"2026-01-20T16:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.814650 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.814710 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.814727 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.814749 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.814766 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:28Z","lastTransitionTime":"2026-01-20T16:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.917592 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.917688 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.917707 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.917762 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.917783 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:28Z","lastTransitionTime":"2026-01-20T16:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:28 crc kubenswrapper[4995]: I0120 16:32:28.989203 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:28 crc kubenswrapper[4995]: E0120 16:32:28.989418 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.020227 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.020271 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.020283 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.020298 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.020309 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.027167 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 11:34:48.628849153 +0000 UTC Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.123266 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.123327 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.123342 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.123357 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.123388 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.226208 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.226270 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.226279 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.226294 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.226304 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.328700 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.328740 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.328751 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.328769 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.328783 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.379021 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/0.log" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.379108 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vlvwg" event={"ID":"5008a882-4540-4ebe-8a27-53f0de0cbd4a","Type":"ContainerStarted","Data":"1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.393228 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.407138 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.424442 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.430921 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.430949 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.430961 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.430980 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.430993 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.438825 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:27Z\\\",\\\"message\\\":\\\"2026-01-20T16:31:42+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49\\\\n2026-01-20T16:31:42+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49 to /host/opt/cni/bin/\\\\n2026-01-20T16:31:42Z [verbose] multus-daemon started\\\\n2026-01-20T16:31:42Z [verbose] Readiness Indicator file check\\\\n2026-01-20T16:32:27Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.451357 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.478898 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.507744 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.521563 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.533827 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.533870 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.533882 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.533899 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.533910 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.538917 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.551668 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.562360 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.579331 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.591836 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.599731 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.615592 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.624792 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.635777 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.636274 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.636313 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.636323 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.636340 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.636351 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.653234 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:29Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.738096 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.738147 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.738161 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.738181 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.738194 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.840350 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.840402 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.840413 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.840435 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.840447 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.943214 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.943259 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.943269 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.943288 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.943300 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:29Z","lastTransitionTime":"2026-01-20T16:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.989048 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.989138 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:29 crc kubenswrapper[4995]: I0120 16:32:29.989221 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:29 crc kubenswrapper[4995]: E0120 16:32:29.989395 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:29 crc kubenswrapper[4995]: E0120 16:32:29.989533 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:29 crc kubenswrapper[4995]: E0120 16:32:29.989645 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.028302 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 07:47:16.546269212 +0000 UTC Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.046060 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.046128 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.046141 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.046158 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.046171 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.148168 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.148196 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.148205 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.148217 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.148227 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.250144 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.250183 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.250195 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.250211 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.250222 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.352557 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.352598 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.352608 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.352622 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.352632 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.455061 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.455116 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.455127 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.455139 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.455149 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.557606 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.557640 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.557647 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.557661 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.557670 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.660564 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.660636 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.660660 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.660691 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.660713 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.762644 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.762690 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.762703 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.762725 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.762737 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.865148 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.865189 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.865205 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.865222 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.865235 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.967481 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.967520 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.967530 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.967544 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.967553 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:30Z","lastTransitionTime":"2026-01-20T16:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.989047 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:30 crc kubenswrapper[4995]: E0120 16:32:30.989421 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:30 crc kubenswrapper[4995]: I0120 16:32:30.989589 4995 scope.go:117] "RemoveContainer" containerID="e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.029120 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 20:59:20.971332827 +0000 UTC Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.069289 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.069327 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.069336 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.069350 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.069360 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.175342 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.175727 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.176124 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.176229 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.176770 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.280271 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.280305 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.280313 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.280327 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.280336 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.382612 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.382649 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.382658 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.382675 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.382684 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.385124 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/2.log" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.387360 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.387783 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.412852 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.425620 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.437140 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.450068 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.460753 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.473827 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.484877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.484914 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.484927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.484942 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.484953 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.486465 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.507858 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.519911 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.535537 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.554253 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.566672 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:27Z\\\",\\\"message\\\":\\\"2026-01-20T16:31:42+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49\\\\n2026-01-20T16:31:42+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49 to /host/opt/cni/bin/\\\\n2026-01-20T16:31:42Z [verbose] multus-daemon started\\\\n2026-01-20T16:31:42Z [verbose] Readiness Indicator file check\\\\n2026-01-20T16:32:27Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.584668 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.587044 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.587098 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.587109 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.587124 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.587141 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.612180 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.621373 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.621409 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.621423 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.621439 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.621462 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.626517 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: E0120 16:32:31.635374 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.639233 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.639220 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.639278 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.639298 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.639315 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.639326 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.648980 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: E0120 16:32:31.650277 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.652880 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.652913 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.652922 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.652935 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.652945 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.661641 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: E0120 16:32:31.668298 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.671610 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.671654 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.671667 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.671705 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.671717 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: E0120 16:32:31.683960 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.687878 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.687919 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.687930 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.687946 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.687958 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: E0120 16:32:31.701060 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:31 crc kubenswrapper[4995]: E0120 16:32:31.701195 4995 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.702505 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.702534 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.702543 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.702559 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.702569 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.804356 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.804401 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.804412 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.804430 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.804442 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.906864 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.906897 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.906908 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.906923 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.906934 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:31Z","lastTransitionTime":"2026-01-20T16:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.988643 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.989155 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:31 crc kubenswrapper[4995]: E0120 16:32:31.989289 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:31 crc kubenswrapper[4995]: E0120 16:32:31.989425 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:31 crc kubenswrapper[4995]: I0120 16:32:31.989708 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:31 crc kubenswrapper[4995]: E0120 16:32:31.989872 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.008648 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.008822 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.009179 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.009336 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.009492 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.012506 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.024548 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.029853 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 13:43:34.160631176 +0000 UTC Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.038530 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.050103 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.063528 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.079124 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.095635 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.112468 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.112518 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.112530 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.112549 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.112563 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.112842 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.122120 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.139741 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.151143 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.162902 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.192632 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.208092 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.214709 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.214757 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.214770 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.214787 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.214799 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.224605 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.239448 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.251870 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:27Z\\\",\\\"message\\\":\\\"2026-01-20T16:31:42+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49\\\\n2026-01-20T16:31:42+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49 to /host/opt/cni/bin/\\\\n2026-01-20T16:31:42Z [verbose] multus-daemon started\\\\n2026-01-20T16:31:42Z [verbose] Readiness Indicator file check\\\\n2026-01-20T16:32:27Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.261007 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.317071 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.317136 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.317149 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.317166 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.317179 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.391895 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/3.log" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.392723 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/2.log" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.395166 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" exitCode=1 Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.395204 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.395240 4995 scope.go:117] "RemoveContainer" containerID="e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.396741 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:32:32 crc kubenswrapper[4995]: E0120 16:32:32.396952 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.410017 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.419210 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.419257 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.419272 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.419288 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.419298 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.423962 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.438069 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.451025 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.462786 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.472296 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.486017 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.494780 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.527764 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.527803 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.527814 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.527831 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.527842 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.534415 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.563172 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.578469 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.588961 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.599575 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:27Z\\\",\\\"message\\\":\\\"2026-01-20T16:31:42+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49\\\\n2026-01-20T16:31:42+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49 to /host/opt/cni/bin/\\\\n2026-01-20T16:31:42Z [verbose] multus-daemon started\\\\n2026-01-20T16:31:42Z [verbose] Readiness Indicator file check\\\\n2026-01-20T16:32:27Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.617894 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4c4a04e7a58b0cda3bd8221f9078ef7e70a7d00bc932324953f64b404e65908\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:06Z\\\",\\\"message\\\":\\\"ed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:05Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:05.784819 6649 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Router\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:05Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:32Z\\\",\\\"message\\\":\\\"ed: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:31.798412 7035 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2 in node crc\\\\nI0120 16:32:31.798376 7035 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-vj5zz\\\\nI0120 16:32:31.798489 7035 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-pgz94 in node crc\\\\nI0120 16:32:31.798432 7035 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0120 16:32:31.798516 7035 ovn.go:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0120 16:32:31.798526 7035 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI0120 16:32:31.798533 7035 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0120 16:32:31.798304 7035 services_controller.go:451] Built service openshift-conf\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.627304 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.629941 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.629975 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.629986 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.630003 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.630015 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.639067 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.650387 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.660158 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:32Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.732228 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.732279 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.732296 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.732319 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.732335 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.835473 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.835513 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.835523 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.835539 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.835549 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.937824 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.937857 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.937867 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.937882 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.937891 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:32Z","lastTransitionTime":"2026-01-20T16:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:32 crc kubenswrapper[4995]: I0120 16:32:32.989238 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:32 crc kubenswrapper[4995]: E0120 16:32:32.989487 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.031163 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 03:26:02.124187543 +0000 UTC Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.040246 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.040289 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.040298 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.040312 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.040321 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.143028 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.143063 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.143072 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.143101 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.143113 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.246219 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.246264 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.246271 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.246287 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.246296 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.348805 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.348840 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.348852 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.348871 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.348882 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.400333 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/3.log" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.403960 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:32:33 crc kubenswrapper[4995]: E0120 16:32:33.404180 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.419529 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.441745 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:27Z\\\",\\\"message\\\":\\\"2026-01-20T16:31:42+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49\\\\n2026-01-20T16:31:42+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49 to /host/opt/cni/bin/\\\\n2026-01-20T16:31:42Z [verbose] multus-daemon started\\\\n2026-01-20T16:31:42Z [verbose] Readiness Indicator file check\\\\n2026-01-20T16:32:27Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.451781 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.451850 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.451874 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.451904 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.451924 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.453444 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.471603 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.484738 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.498423 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.514565 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:32Z\\\",\\\"message\\\":\\\"ed: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:31.798412 7035 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2 in node crc\\\\nI0120 16:32:31.798376 7035 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-vj5zz\\\\nI0120 16:32:31.798489 7035 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-pgz94 in node crc\\\\nI0120 16:32:31.798432 7035 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0120 16:32:31.798516 7035 ovn.go:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0120 16:32:31.798526 7035 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI0120 16:32:31.798533 7035 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0120 16:32:31.798304 7035 services_controller.go:451] Built service openshift-conf\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.529240 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.541462 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.554146 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.554201 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.554212 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.554225 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.554235 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.556576 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.567931 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.577288 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.589900 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.599299 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.607873 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.619791 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.632372 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.644856 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:33Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.656384 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.656415 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.656423 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.656436 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.656445 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.758440 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.758482 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.758495 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.758511 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.758522 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.860894 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.860942 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.860954 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.860972 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.860984 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.963530 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.963578 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.963593 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.963611 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.963623 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:33Z","lastTransitionTime":"2026-01-20T16:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.991281 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.991364 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:33 crc kubenswrapper[4995]: E0120 16:32:33.991409 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:33 crc kubenswrapper[4995]: E0120 16:32:33.991534 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:33 crc kubenswrapper[4995]: I0120 16:32:33.991597 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:33 crc kubenswrapper[4995]: E0120 16:32:33.991676 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.031344 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 23:28:47.425462538 +0000 UTC Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.067218 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.067273 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.067289 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.067312 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.067328 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.169504 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.169842 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.169980 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.170188 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.170352 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.274218 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.274270 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.274305 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.274338 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.274361 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.376866 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.376927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.376945 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.376968 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.376985 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.479422 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.479470 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.479484 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.479501 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.479513 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.581399 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.581658 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.581781 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.581943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.582064 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.684019 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.684317 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.684436 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.684526 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.684602 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.788026 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.788250 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.788372 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.788468 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.788554 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.892126 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.892194 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.892217 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.892243 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.892264 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.988990 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:34 crc kubenswrapper[4995]: E0120 16:32:34.989179 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.994867 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.994930 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.994954 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.994984 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:34 crc kubenswrapper[4995]: I0120 16:32:34.995005 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:34Z","lastTransitionTime":"2026-01-20T16:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.031564 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 14:39:50.323316511 +0000 UTC Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.100726 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.100764 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.100774 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.100790 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.100801 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:35Z","lastTransitionTime":"2026-01-20T16:32:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.203372 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.203424 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.203441 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.203465 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.203485 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:35Z","lastTransitionTime":"2026-01-20T16:32:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.306781 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.306814 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.306823 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.306835 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.306843 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:35Z","lastTransitionTime":"2026-01-20T16:32:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.409516 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.409571 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.409596 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.409625 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.409647 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:35Z","lastTransitionTime":"2026-01-20T16:32:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.512619 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.512676 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.512697 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.512723 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.512743 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:35Z","lastTransitionTime":"2026-01-20T16:32:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.615976 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.616030 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.616047 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.616071 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.616146 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:35Z","lastTransitionTime":"2026-01-20T16:32:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.719504 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.719558 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.719573 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.719596 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.719612 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:35Z","lastTransitionTime":"2026-01-20T16:32:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.821990 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.822046 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.822061 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.822130 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.822164 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:35Z","lastTransitionTime":"2026-01-20T16:32:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.925378 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.925438 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.925455 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.925478 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.925495 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:35Z","lastTransitionTime":"2026-01-20T16:32:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.989675 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.989735 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:35 crc kubenswrapper[4995]: E0120 16:32:35.989870 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:35 crc kubenswrapper[4995]: I0120 16:32:35.989943 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:35 crc kubenswrapper[4995]: E0120 16:32:35.990133 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:35 crc kubenswrapper[4995]: E0120 16:32:35.990300 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.028047 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.028167 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.028187 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.028209 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.028226 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.032413 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 19:45:27.860870889 +0000 UTC Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.131826 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.131915 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.131964 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.131991 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.132009 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.234796 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.234897 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.234924 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.234951 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.234968 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.338066 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.338153 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.338164 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.338180 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.338192 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.440584 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.440651 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.440673 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.440702 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.440723 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.543394 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.543425 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.543434 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.543447 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.543457 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.646906 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.646963 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.646984 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.647010 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.647027 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.750583 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.750662 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.750685 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.750712 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.750732 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.853477 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.853533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.853550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.853572 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.853588 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.956873 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.956953 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.956978 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.957008 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.957026 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:36Z","lastTransitionTime":"2026-01-20T16:32:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:36 crc kubenswrapper[4995]: I0120 16:32:36.988716 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:36 crc kubenswrapper[4995]: E0120 16:32:36.988957 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.000989 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.033333 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 00:23:23.606492632 +0000 UTC Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.059566 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.059619 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.059634 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.059655 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.059669 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.162201 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.162235 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.162245 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.162260 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.162270 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.265046 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.265320 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.265350 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.265378 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.265399 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.368328 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.368371 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.368380 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.368392 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.368401 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.471129 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.471173 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.471190 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.471214 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.471230 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.573271 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.573500 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.573570 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.573640 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.573699 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.675848 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.675894 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.675909 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.675927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.675940 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.778296 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.778671 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.778862 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.779026 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.779221 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.881662 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.881710 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.881724 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.881742 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.881755 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.984067 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.984478 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.984676 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.984877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.985154 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:37Z","lastTransitionTime":"2026-01-20T16:32:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.989481 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:37 crc kubenswrapper[4995]: E0120 16:32:37.989585 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.989484 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:37 crc kubenswrapper[4995]: I0120 16:32:37.989674 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:37 crc kubenswrapper[4995]: E0120 16:32:37.989796 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:37 crc kubenswrapper[4995]: E0120 16:32:37.989855 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.034322 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 01:15:14.983142575 +0000 UTC Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.088211 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.088253 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.088266 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.088283 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.088295 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:38Z","lastTransitionTime":"2026-01-20T16:32:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.191174 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.191236 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.191254 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.191279 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.191299 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:38Z","lastTransitionTime":"2026-01-20T16:32:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.294226 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.294275 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.294286 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.294303 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.294315 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:38Z","lastTransitionTime":"2026-01-20T16:32:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.397302 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.397385 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.397411 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.397444 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.397467 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:38Z","lastTransitionTime":"2026-01-20T16:32:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.499613 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.499893 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.500024 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.500199 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.500321 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:38Z","lastTransitionTime":"2026-01-20T16:32:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.602952 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.603025 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.603049 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.603118 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.603147 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:38Z","lastTransitionTime":"2026-01-20T16:32:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.707173 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.707249 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.707276 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.707305 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.707324 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:38Z","lastTransitionTime":"2026-01-20T16:32:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.809807 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.809877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.809900 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.809931 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.809961 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:38Z","lastTransitionTime":"2026-01-20T16:32:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.913954 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.914012 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.914031 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.914054 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.914114 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:38Z","lastTransitionTime":"2026-01-20T16:32:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:38 crc kubenswrapper[4995]: I0120 16:32:38.988557 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:38 crc kubenswrapper[4995]: E0120 16:32:38.988773 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.016576 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.016671 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.016688 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.016753 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.016780 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.035175 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 09:37:45.801783504 +0000 UTC Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.118862 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.118905 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.118916 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.118934 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.118945 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.221532 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.221597 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.221615 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.221640 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.221658 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.324584 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.324631 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.324645 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.324668 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.324683 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.427009 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.427072 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.427142 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.427172 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.427194 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.530012 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.530114 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.530145 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.530178 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.530202 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.633270 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.633329 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.633340 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.633360 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.633371 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.738068 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.738165 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.738186 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.738220 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.738248 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.841027 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.841098 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.841114 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.841134 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.841147 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.942940 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.943181 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.943268 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.943365 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.943469 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:39Z","lastTransitionTime":"2026-01-20T16:32:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.989241 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.989320 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:39 crc kubenswrapper[4995]: E0120 16:32:39.989365 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:39 crc kubenswrapper[4995]: I0120 16:32:39.989247 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:39 crc kubenswrapper[4995]: E0120 16:32:39.989433 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:39 crc kubenswrapper[4995]: E0120 16:32:39.989573 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.035485 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 16:39:46.353916426 +0000 UTC Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.046892 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.046960 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.046987 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.047018 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.047042 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.149655 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.149697 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.149711 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.149730 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.149745 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.252280 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.252320 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.252334 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.252353 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.252366 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.354488 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.354524 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.354535 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.354550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.354564 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.456459 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.456529 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.456549 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.456572 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.456590 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.559387 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.559420 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.559430 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.559446 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.559456 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.661783 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.661848 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.661871 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.661899 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.661921 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.764874 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.764915 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.764927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.764943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.764955 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.866947 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.866980 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.866989 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.867001 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.867011 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.969757 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.969822 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.969847 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.969881 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.969904 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:40Z","lastTransitionTime":"2026-01-20T16:32:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:40 crc kubenswrapper[4995]: I0120 16:32:40.989555 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:40 crc kubenswrapper[4995]: E0120 16:32:40.989693 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.036032 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 06:40:49.147891102 +0000 UTC Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.072212 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.072254 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.072266 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.072286 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.072302 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.175983 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.176137 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.176167 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.176197 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.176221 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.278244 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.278516 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.278603 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.278683 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.278778 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.381113 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.381162 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.381174 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.381190 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.381203 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.483768 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.483793 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.483804 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.483818 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.483828 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.586619 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.586663 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.586677 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.586693 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.586704 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.689743 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.690043 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.690172 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.690276 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.690367 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.792735 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.792765 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.792776 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.792789 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.792798 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.881840 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.881868 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.881876 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.881888 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.881898 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: E0120 16:32:41.898711 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.902911 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.902951 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.902962 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.902978 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.902988 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: E0120 16:32:41.919621 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.924306 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.924342 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.924353 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.924369 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.924380 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: E0120 16:32:41.941640 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.945306 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.945335 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.945346 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.945363 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.945376 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: E0120 16:32:41.959309 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.963370 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.963409 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.963419 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.963434 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.963446 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: E0120 16:32:41.980144 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:41Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:41 crc kubenswrapper[4995]: E0120 16:32:41.980262 4995 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.981698 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.981731 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.981740 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.981755 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.981766 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:41Z","lastTransitionTime":"2026-01-20T16:32:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.989333 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.989344 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:41 crc kubenswrapper[4995]: I0120 16:32:41.989432 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:41 crc kubenswrapper[4995]: E0120 16:32:41.989556 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:41 crc kubenswrapper[4995]: E0120 16:32:41.989673 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:41 crc kubenswrapper[4995]: E0120 16:32:41.989745 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.002991 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.018054 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.035943 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.036247 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 11:14:58.865263129 +0000 UTC Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.054800 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.072431 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"908a5575-bc94-4308-84c9-7d3440eb907c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5df813f52fd39fd41ac24e380e0bcbc61bba24b8227e4dcf168e9b0bc4db6d3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b4674d68fc4efd41f62da80d01dbd593b50ecab935f87fb39389d7e9d1efcd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b4674d68fc4efd41f62da80d01dbd593b50ecab935f87fb39389d7e9d1efcd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.084459 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.084495 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.084524 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.084538 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.084546 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:42Z","lastTransitionTime":"2026-01-20T16:32:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.092125 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.108194 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.124034 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.139634 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.153281 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.168832 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.186637 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.187651 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.187702 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.187711 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.187729 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.187741 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:42Z","lastTransitionTime":"2026-01-20T16:32:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.198872 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.213875 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.227729 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.241648 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:27Z\\\",\\\"message\\\":\\\"2026-01-20T16:31:42+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49\\\\n2026-01-20T16:31:42+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49 to /host/opt/cni/bin/\\\\n2026-01-20T16:31:42Z [verbose] multus-daemon started\\\\n2026-01-20T16:31:42Z [verbose] Readiness Indicator file check\\\\n2026-01-20T16:32:27Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.251810 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.271986 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.290570 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.290612 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.290619 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.290634 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.290643 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:42Z","lastTransitionTime":"2026-01-20T16:32:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.291967 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:32Z\\\",\\\"message\\\":\\\"ed: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:31.798412 7035 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2 in node crc\\\\nI0120 16:32:31.798376 7035 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-vj5zz\\\\nI0120 16:32:31.798489 7035 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-pgz94 in node crc\\\\nI0120 16:32:31.798432 7035 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0120 16:32:31.798516 7035 ovn.go:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0120 16:32:31.798526 7035 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI0120 16:32:31.798533 7035 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0120 16:32:31.798304 7035 services_controller.go:451] Built service openshift-conf\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:42Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.392118 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.392193 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.392209 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.392232 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.392255 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:42Z","lastTransitionTime":"2026-01-20T16:32:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.498928 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.498981 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.498995 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.499015 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.499035 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:42Z","lastTransitionTime":"2026-01-20T16:32:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.602438 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.602490 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.602502 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.602521 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.602533 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:42Z","lastTransitionTime":"2026-01-20T16:32:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.704795 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.704856 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.704872 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.704897 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.704915 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:42Z","lastTransitionTime":"2026-01-20T16:32:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.808018 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.808130 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.808400 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.808708 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.808765 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:42Z","lastTransitionTime":"2026-01-20T16:32:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.911717 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.911797 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.911814 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.911840 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.911856 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:42Z","lastTransitionTime":"2026-01-20T16:32:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:42 crc kubenswrapper[4995]: I0120 16:32:42.988767 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:42 crc kubenswrapper[4995]: E0120 16:32:42.988888 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.014550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.014580 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.014588 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.014602 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.014643 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.036900 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 17:17:56.55198864 +0000 UTC Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.118071 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.118159 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.118175 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.118197 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.118213 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.221314 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.221606 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.221616 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.221630 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.221640 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.324210 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.324920 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.324952 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.324978 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.324994 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.426943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.426973 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.426980 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.426992 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.427003 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.528970 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.529014 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.529023 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.529040 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.529049 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.631238 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.631272 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.631282 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.631296 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.631308 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.737222 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.737291 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.737311 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.737333 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.737347 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.839449 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.839505 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.839521 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.839540 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.839555 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.874589 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.874704 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.874678334 +0000 UTC m=+146.119283150 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.941458 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.941502 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.941513 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.941532 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.941544 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:43Z","lastTransitionTime":"2026-01-20T16:32:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.975197 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.975248 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.975289 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.975334 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975419 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975459 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975462 4995 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975482 4995 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975500 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975527 4995 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975474 4995 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975543 4995 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975547 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.975523422 +0000 UTC m=+146.220128258 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975597 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.975581323 +0000 UTC m=+146.220186149 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975624 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.975616104 +0000 UTC m=+146.220220930 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.975646 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.975638795 +0000 UTC m=+146.220243611 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.989509 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.989509 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.989592 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:43 crc kubenswrapper[4995]: I0120 16:32:43.989934 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.989984 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:43 crc kubenswrapper[4995]: E0120 16:32:43.990149 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.037260 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 19:31:54.396030475 +0000 UTC Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.044071 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.044142 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.044154 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.044206 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.044242 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.156994 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.157116 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.157144 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.157176 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.157201 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.259843 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.259884 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.259894 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.259911 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.259923 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.362250 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.362297 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.362309 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.362329 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.362342 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.465017 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.465095 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.465108 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.465125 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.465137 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.567275 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.567321 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.567332 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.567348 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.567361 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.669982 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.670032 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.670051 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.670120 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.670148 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.772609 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.772648 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.772657 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.772673 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.772683 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.874808 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.874847 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.874858 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.874874 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.874885 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.977717 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.977766 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.977782 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.977799 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.977809 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:44Z","lastTransitionTime":"2026-01-20T16:32:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:44 crc kubenswrapper[4995]: I0120 16:32:44.989483 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:44 crc kubenswrapper[4995]: E0120 16:32:44.989795 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.037465 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 08:05:16.467839026 +0000 UTC Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.080343 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.080432 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.080459 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.080492 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.080516 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:45Z","lastTransitionTime":"2026-01-20T16:32:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.183066 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.183120 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.183134 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.183155 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.183170 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:45Z","lastTransitionTime":"2026-01-20T16:32:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.291475 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.291596 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.291654 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.291684 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.291741 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:45Z","lastTransitionTime":"2026-01-20T16:32:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.394408 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.394468 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.394487 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.394512 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.394531 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:45Z","lastTransitionTime":"2026-01-20T16:32:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.497425 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.497501 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.497525 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.497555 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.497578 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:45Z","lastTransitionTime":"2026-01-20T16:32:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.601124 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.601177 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.601214 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.601250 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.601273 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:45Z","lastTransitionTime":"2026-01-20T16:32:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.705593 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.705644 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.705661 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.705685 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.705702 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:45Z","lastTransitionTime":"2026-01-20T16:32:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.809136 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.809195 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.809212 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.809236 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.809252 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:45Z","lastTransitionTime":"2026-01-20T16:32:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.912635 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.912709 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.912725 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.912749 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.912767 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:45Z","lastTransitionTime":"2026-01-20T16:32:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.988994 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.989000 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:45 crc kubenswrapper[4995]: E0120 16:32:45.989228 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:45 crc kubenswrapper[4995]: E0120 16:32:45.989354 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:45 crc kubenswrapper[4995]: I0120 16:32:45.989520 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:45 crc kubenswrapper[4995]: E0120 16:32:45.989653 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.015265 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.015345 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.015370 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.015398 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.015420 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.037860 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 15:56:45.909898332 +0000 UTC Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.118152 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.118211 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.118228 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.118252 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.118270 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.221536 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.221601 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.221614 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.221641 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.221656 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.324750 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.324858 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.324877 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.324905 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.324923 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.428533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.428629 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.428656 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.428691 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.428716 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.531672 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.531749 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.531768 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.531801 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.531825 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.635008 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.635118 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.635143 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.635176 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.635198 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.737365 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.737405 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.737417 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.737434 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.737446 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.840738 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.840781 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.840791 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.840805 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.840816 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.943468 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.943526 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.943544 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.943571 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.943593 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:46Z","lastTransitionTime":"2026-01-20T16:32:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:46 crc kubenswrapper[4995]: I0120 16:32:46.989572 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:46 crc kubenswrapper[4995]: E0120 16:32:46.989786 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.038412 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 12:41:39.190759628 +0000 UTC Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.046409 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.046448 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.046470 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.046494 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.046511 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.149500 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.149533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.149545 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.149562 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.149573 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.252423 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.252485 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.252500 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.252519 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.252533 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.354862 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.354908 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.354920 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.354936 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.354949 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.456887 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.456922 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.456930 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.456943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.456952 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.559914 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.559990 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.560014 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.560044 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.560066 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.662367 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.662418 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.662435 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.662455 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.662469 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.765575 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.765657 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.765677 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.765701 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.765717 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.869201 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.869286 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.869309 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.869330 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.869343 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.971868 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.971932 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.971950 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.972155 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.972217 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:47Z","lastTransitionTime":"2026-01-20T16:32:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.989386 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:47 crc kubenswrapper[4995]: E0120 16:32:47.989551 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.989402 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.989720 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:47 crc kubenswrapper[4995]: E0120 16:32:47.989878 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:47 crc kubenswrapper[4995]: E0120 16:32:47.989971 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:47 crc kubenswrapper[4995]: I0120 16:32:47.993795 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:32:47 crc kubenswrapper[4995]: E0120 16:32:47.994025 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.038863 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 19:50:40.405405907 +0000 UTC Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.074935 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.074973 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.074983 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.074998 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.075009 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.177152 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.177196 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.177207 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.177222 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.177234 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.279269 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.279315 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.279331 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.279350 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.279363 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.381905 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.381962 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.381974 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.381990 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.382030 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.484696 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.484769 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.484780 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.484797 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.484808 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.587792 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.587926 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.587946 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.588260 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.588374 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.690840 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.690892 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.690916 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.690943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.690964 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.793115 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.793183 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.793204 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.793233 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.793252 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.895667 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.895714 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.895729 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.895750 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.895765 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.989380 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:48 crc kubenswrapper[4995]: E0120 16:32:48.989557 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.998052 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.998126 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.998147 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.998195 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:48 crc kubenswrapper[4995]: I0120 16:32:48.998211 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:48Z","lastTransitionTime":"2026-01-20T16:32:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.039285 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 13:42:06.469132344 +0000 UTC Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.100133 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.100217 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.100240 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.100296 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.100313 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:49Z","lastTransitionTime":"2026-01-20T16:32:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.203664 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.203739 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.203763 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.203798 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.203817 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:49Z","lastTransitionTime":"2026-01-20T16:32:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.305980 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.306045 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.306063 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.306126 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.306152 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:49Z","lastTransitionTime":"2026-01-20T16:32:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.409155 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.409233 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.409253 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.409278 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.409297 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:49Z","lastTransitionTime":"2026-01-20T16:32:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.512505 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.512551 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.512564 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.512580 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.512593 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:49Z","lastTransitionTime":"2026-01-20T16:32:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.615267 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.615346 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.615363 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.615421 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.615435 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:49Z","lastTransitionTime":"2026-01-20T16:32:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.718365 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.718416 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.718429 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.718449 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.718463 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:49Z","lastTransitionTime":"2026-01-20T16:32:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.822260 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.822337 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.822355 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.822381 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.822398 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:49Z","lastTransitionTime":"2026-01-20T16:32:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.925606 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.925649 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.925660 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.925677 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.925691 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:49Z","lastTransitionTime":"2026-01-20T16:32:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.988696 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.988731 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:49 crc kubenswrapper[4995]: E0120 16:32:49.988914 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:49 crc kubenswrapper[4995]: I0120 16:32:49.988936 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:49 crc kubenswrapper[4995]: E0120 16:32:49.989065 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:49 crc kubenswrapper[4995]: E0120 16:32:49.989217 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.027932 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.028012 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.028035 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.028063 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.028124 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.040122 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 18:41:43.188639323 +0000 UTC Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.131684 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.131738 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.131760 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.131792 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.131819 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.235269 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.235321 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.235338 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.235364 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.235384 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.338497 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.338543 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.338558 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.338575 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.338586 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.441108 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.441163 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.441180 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.441202 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.441221 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.544208 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.544248 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.544260 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.544276 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.544289 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.646776 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.646815 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.646829 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.646847 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.646861 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.750331 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.750423 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.750449 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.750477 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.750498 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.853642 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.853700 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.853716 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.853738 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.853752 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.956054 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.956157 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.956179 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.956205 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.956222 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:50Z","lastTransitionTime":"2026-01-20T16:32:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:50 crc kubenswrapper[4995]: I0120 16:32:50.988509 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:50 crc kubenswrapper[4995]: E0120 16:32:50.988854 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.041138 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 06:09:19.144029262 +0000 UTC Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.057959 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.057997 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.058005 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.058018 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.058027 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.160052 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.160090 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.160099 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.160111 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.160121 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.262303 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.262337 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.262348 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.262365 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.262378 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.364923 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.364965 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.364973 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.364987 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.364998 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.468126 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.468178 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.468194 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.468216 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.468229 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.571597 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.571670 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.571690 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.571716 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.571733 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.674494 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.674533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.674546 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.674564 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.674587 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.777558 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.777590 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.777600 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.777618 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.777629 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.879599 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.879636 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.879646 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.879662 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.879674 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.982177 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.982211 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.982219 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.982234 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.982244 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:51Z","lastTransitionTime":"2026-01-20T16:32:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.988577 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.988646 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:51 crc kubenswrapper[4995]: E0120 16:32:51.988673 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:51 crc kubenswrapper[4995]: E0120 16:32:51.988784 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:51 crc kubenswrapper[4995]: I0120 16:32:51.988825 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:51 crc kubenswrapper[4995]: E0120 16:32:51.988871 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.009514 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e82420c5-a3ae-43ea-a208-b757794521a6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:32Z\\\",\\\"message\\\":\\\"ed: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:31Z is after 2025-08-24T17:21:41Z]\\\\nI0120 16:32:31.798412 7035 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2 in node crc\\\\nI0120 16:32:31.798376 7035 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-vj5zz\\\\nI0120 16:32:31.798489 7035 ovn.go:134] Ensuring zone local for Pod openshift-dns/node-resolver-pgz94 in node crc\\\\nI0120 16:32:31.798432 7035 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0120 16:32:31.798516 7035 ovn.go:134] Ensuring zone local for Pod openshift-network-node-identity/network-node-identity-vrzqb in node crc\\\\nI0120 16:32:31.798526 7035 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI0120 16:32:31.798533 7035 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nI0120 16:32:31.798304 7035 services_controller.go:451] Built service openshift-conf\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:32:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptmxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qp9h9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.015882 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.015934 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.015947 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.015967 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.015980 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.020890 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cda39176b9d1e52e757adcafde00f46a714a1e2e48f2b5c338ad92e4bd334ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ctcsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-ns9m2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: E0120 16:32:52.030247 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.032646 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"908a5575-bc94-4308-84c9-7d3440eb907c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5df813f52fd39fd41ac24e380e0bcbc61bba24b8227e4dcf168e9b0bc4db6d3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b4674d68fc4efd41f62da80d01dbd593b50ecab935f87fb39389d7e9d1efcd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2b4674d68fc4efd41f62da80d01dbd593b50ecab935f87fb39389d7e9d1efcd4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.033971 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.034026 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.034038 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.034055 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.034070 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.042111 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 19:59:36.131173223 +0000 UTC Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.045602 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://75045855d2ce4b3d5289c11da3df40e13379b8d2f1dda4845788df70a3ad9e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: E0120 16:32:52.049182 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.053385 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.053430 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.053441 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.053459 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.053473 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.061825 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: E0120 16:32:52.064229 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.067323 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.067355 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.067367 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.067383 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.067396 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.078806 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: E0120 16:32:52.079755 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.083834 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.083883 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.083900 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.083921 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.083939 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.097155 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a0e5bec5-c9a4-46b0-87c1-5eea75de723e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a26fb1e4edac56ab0cf29241aa7383365416c1587f476e22a393e014996f591c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8836e4efa6a8364bdffeb5b7ec68cdbe5aa325df1af4bb1bc8087750cec5068\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a9b88069cda87e58e52bd2b6d5cc3c1ab7a76822576fddc6e595d096e612d64\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5c64575e35bf4620acfd17eacba71dadd61eea10e168a467c23b2813038c188\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03b0c947170cfd2f4b4d87d9527aba0c9ee7c174022753feb02bb6f3452034d4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3f0edcc2893fb873c84c4d8c04ef16b0a4da53ceb740a4906b4cb0a14693ee1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://94a3ab412d9726f23027eb79b1155659223a7d6f75bfb1f0b09806d8d32ade21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:48Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7jq6l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-vj5zz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: E0120 16:32:52.098651 4995 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3d08bfb7-7ebc-480a-a0ba-a2a49fa5a5f4\\\",\\\"systemUUID\\\":\\\"9bcd4f67-9f17-42a8-8b89-21971da88d3d\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: E0120 16:32:52.101276 4995 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.104716 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.104758 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.104774 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.104796 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.104811 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.112124 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b40e213e-290e-403c-a77a-065638455b73\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc39a0b91467e2a43eca3c52144b4967fc667054f6069de8e4a165b16dffa816\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a1f2f3cad82baa9489dd9e399f766bf6b80ba16dd5a8b50d82ca1e3d2d2c53b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnhpk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-75tq2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.124742 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f24f7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:54Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kbdtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.142195 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3574abb5-159d-4e7c-b894-7233c1798084\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"message\\\":\\\"4.388974 1 observer_polling.go:159] Starting file observer\\\\nW0120 16:31:34.393340 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0120 16:31:34.393617 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0120 16:31:34.395394 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3805501027/tls.crt::/tmp/serving-cert-3805501027/tls.key\\\\\\\"\\\\nI0120 16:31:39.953273 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0120 16:31:39.957812 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0120 16:31:39.957839 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0120 16:31:39.957859 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0120 16:31:39.957877 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0120 16:31:39.966514 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0120 16:31:39.966563 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966580 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0120 16:31:39.966586 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0120 16:31:39.966591 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0120 16:31:39.966599 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0120 16:31:39.966604 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0120 16:31:39.966640 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.158642 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89bd2693d48a04e71a6a401e653e68ac2c10e6998ce22ea9d173c6ec60555221\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65918ad0d2f561b27f0254c0b260ad1fe5cda67993eb01dbf559253b8dd1f8f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.169304 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:39Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.178055 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pgz94" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32295b75-631b-4da8-9396-c942306f0d57\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://81992e531be18d2135a1511bdee9ffeffcfb632900832932e87e0cef8608d698\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d87w5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:40Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pgz94\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.191668 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-vlvwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5008a882-4540-4ebe-8a27-53f0de0cbd4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T16:32:27Z\\\",\\\"message\\\":\\\"2026-01-20T16:31:42+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49\\\\n2026-01-20T16:31:42+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3f8454f2-87ac-487a-92d6-ee4415820c49 to /host/opt/cni/bin/\\\\n2026-01-20T16:31:42Z [verbose] multus-daemon started\\\\n2026-01-20T16:31:42Z [verbose] Readiness Indicator file check\\\\n2026-01-20T16:32:27Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:41Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:32:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-z87zh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-vlvwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.203038 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-hqgw4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd1c7a41-f7a9-462f-b9c3-480e0715d465\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c9b50d0e01b91bd6c7a6988da779757b20bb7bbabdd9a9760f19d4fc323db07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-chdms\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:42Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-hqgw4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.206971 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.207131 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.207208 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.207288 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.207373 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.233228 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"30f850bf-fe8f-479d-95a7-b8efe4331250\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a8749885fdf196363e5840500ce2e38f6fb36979c84740970a868d1972101f80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d4592c9b2e720ad71213c6b8fe3a2b065e9dc505d869336ead1ab46fa0eb5fda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa75b8cdd9ad5f96529d908ce6a599e06c6e715c12adbef4a3f099a9c0ef6f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e8cad67b6885d9df623c43fe4c3c5b3762854a92a5685e915e9921d44f937ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cdbc5992e300acf7bc89f713971153b44e9be59197707f082d5f0e6b5dcfa6fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c4c252c0b5d817f2a66a14e350b6fbc9d0a0cdf236e1c922f24f527a3375dc3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e3a918332e46b4eff64a5523f955e80ed09682cb278eecb2bd657542731246eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b83f50f36824a598390dd895432d728c1e90b0a962fdd922ed6608f44e3e6239\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.252823 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8a1c0106-323d-4a96-a3fb-20c3867524f6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e303aa3c3b23147acec28dbb2de8de757267794cee2d34886480771fc95c4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://768259b01b64accca18ad29fe183e41d2dfee7ffdd95acd0ab8394ed2d905c21\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8523f8bcbe311ced22def0bfdd65abedc72de719c10f0827c3d07199d5fe97bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.269641 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69551341-4932-43de-aba2-fe6543754b3a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:32:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://96ab092084d25c32d5bb06e257345e3f9e16c205b018195c59f37408d19ac141\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c15d34020010f509f7207e4fa61d33feed172d85511ec54b8baf14c45e77ed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://093e94aa8b0dba1ae5aab9386630b0585800e31d9010426cfd934feb98882c54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62e17d78d1c51bf1f814c6fe5aec0dc1c9d82197ece14a392903871056c84ffe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T16:31:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T16:31:22Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T16:31:22Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.288403 4995 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T16:31:41Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://91354372bb56d1202cdd77f8af86fadcea01be9064b601ddf5388c6308abdeed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T16:31:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T16:32:52Z is after 2025-08-24T17:21:41Z" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.310652 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.310786 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.310807 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.310831 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.310883 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.413670 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.413737 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.413758 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.413779 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.413793 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.516686 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.517020 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.517040 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.517065 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.517119 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.619176 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.619209 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.619220 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.619237 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.619249 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.722430 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.722637 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.722765 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.722802 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.722827 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.825491 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.825550 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.825564 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.825580 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.825593 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.928060 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.928191 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.928211 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.928282 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.928318 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:52Z","lastTransitionTime":"2026-01-20T16:32:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:52 crc kubenswrapper[4995]: I0120 16:32:52.989528 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:52 crc kubenswrapper[4995]: E0120 16:32:52.989802 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.030591 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.030652 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.030671 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.030695 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.030715 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.042999 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 21:41:33.122885782 +0000 UTC Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.133338 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.133412 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.133432 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.133458 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.133477 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.235894 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.235974 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.236009 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.236037 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.236063 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.338624 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.338674 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.338685 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.338703 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.338717 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.441254 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.441312 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.441331 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.441355 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.441372 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.544298 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.544337 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.544348 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.544365 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.544376 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.648168 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.648312 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.648340 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.648413 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.648441 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.752734 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.752790 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.752806 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.752843 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.752934 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.856199 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.856285 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.856319 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.856351 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.856376 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.960779 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.960903 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.960926 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.960955 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.960975 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:53Z","lastTransitionTime":"2026-01-20T16:32:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.988653 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.988691 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:53 crc kubenswrapper[4995]: E0120 16:32:53.988900 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:53 crc kubenswrapper[4995]: I0120 16:32:53.988930 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:53 crc kubenswrapper[4995]: E0120 16:32:53.989547 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:53 crc kubenswrapper[4995]: E0120 16:32:53.989762 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.043293 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 02:36:22.278532747 +0000 UTC Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.064514 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.064596 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.064616 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.064642 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.064659 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.167263 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.167335 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.167353 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.167376 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.167394 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.271120 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.271201 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.271253 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.271278 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.271296 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.374707 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.374775 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.374802 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.374832 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.374858 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.478645 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.478720 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.478741 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.478767 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.478784 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.581858 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.581916 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.581933 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.581958 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.581975 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.685392 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.685457 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.685533 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.685568 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.685589 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.788617 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.788675 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.788691 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.788715 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.788731 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.891469 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.891544 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.891571 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.891601 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.891619 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.988958 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:54 crc kubenswrapper[4995]: E0120 16:32:54.989189 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.994719 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.994768 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.994788 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.994809 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:54 crc kubenswrapper[4995]: I0120 16:32:54.994826 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:54Z","lastTransitionTime":"2026-01-20T16:32:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.043932 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 00:50:35.398336618 +0000 UTC Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.098056 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.098137 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.098155 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.098176 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.098193 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:55Z","lastTransitionTime":"2026-01-20T16:32:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.201262 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.201305 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.201321 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.201342 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.201358 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:55Z","lastTransitionTime":"2026-01-20T16:32:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.304868 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.305271 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.305482 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.305756 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.305936 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:55Z","lastTransitionTime":"2026-01-20T16:32:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.408984 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.409034 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.409054 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.409115 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.409133 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:55Z","lastTransitionTime":"2026-01-20T16:32:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.511720 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.512228 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.512424 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.512588 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.512730 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:55Z","lastTransitionTime":"2026-01-20T16:32:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.615615 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.615663 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.615679 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.615700 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.615718 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:55Z","lastTransitionTime":"2026-01-20T16:32:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.718791 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.718848 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.718864 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.718888 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.718906 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:55Z","lastTransitionTime":"2026-01-20T16:32:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.821192 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.821234 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.821245 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.821261 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.821273 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:55Z","lastTransitionTime":"2026-01-20T16:32:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.923747 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.923829 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.923854 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.923905 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.923931 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:55Z","lastTransitionTime":"2026-01-20T16:32:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.988873 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.988937 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:55 crc kubenswrapper[4995]: I0120 16:32:55.989058 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:55 crc kubenswrapper[4995]: E0120 16:32:55.989068 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:55 crc kubenswrapper[4995]: E0120 16:32:55.989226 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:55 crc kubenswrapper[4995]: E0120 16:32:55.989323 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.031593 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.031658 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.031673 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.031693 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.031709 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.044162 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 02:23:30.892294863 +0000 UTC Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.134883 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.134968 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.134986 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.135015 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.135032 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.237545 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.237631 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.237648 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.237674 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.237845 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.340655 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.340723 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.340745 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.340774 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.340798 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.443553 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.443627 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.443651 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.443680 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.443701 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.546848 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.546917 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.546942 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.546971 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.546992 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.649996 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.650066 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.650118 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.650146 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.650166 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.753352 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.753706 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.753859 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.754002 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.754183 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.857723 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.857776 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.857793 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.857816 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.857834 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.961227 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.961595 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.961748 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.961881 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.962018 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:56Z","lastTransitionTime":"2026-01-20T16:32:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:56 crc kubenswrapper[4995]: I0120 16:32:56.988934 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:56 crc kubenswrapper[4995]: E0120 16:32:56.989365 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.044325 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 05:49:21.9495065 +0000 UTC Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.064782 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.064823 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.064844 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.064868 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.064886 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.167657 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.168043 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.168261 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.168439 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.168625 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.271405 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.271856 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.272073 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.272308 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.272553 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.376365 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.376467 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.376479 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.376496 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.376507 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.479111 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.479555 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.479744 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.479882 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.480397 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.582848 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.582904 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.582917 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.582941 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.582957 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.686010 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.686129 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.686161 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.686196 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.686221 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.789347 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.789431 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.789452 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.789479 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.789500 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.892623 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.892682 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.892698 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.892727 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.892745 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.989727 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:57 crc kubenswrapper[4995]: E0120 16:32:57.989949 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.989758 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.990021 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:57 crc kubenswrapper[4995]: E0120 16:32:57.990144 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:32:57 crc kubenswrapper[4995]: E0120 16:32:57.990265 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.996636 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.996697 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.996719 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.996746 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:57 crc kubenswrapper[4995]: I0120 16:32:57.996773 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:57Z","lastTransitionTime":"2026-01-20T16:32:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.045734 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 01:33:13.940788405 +0000 UTC Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.099138 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.099177 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.099189 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.099206 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.099219 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:58Z","lastTransitionTime":"2026-01-20T16:32:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.202611 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.202738 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.202770 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.202801 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.202824 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:58Z","lastTransitionTime":"2026-01-20T16:32:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.307011 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.307154 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.307186 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.307218 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.307244 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:58Z","lastTransitionTime":"2026-01-20T16:32:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.410823 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.410897 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.410920 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.410952 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.410976 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:58Z","lastTransitionTime":"2026-01-20T16:32:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.514297 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.514376 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.514400 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.514432 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.514456 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:58Z","lastTransitionTime":"2026-01-20T16:32:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.617416 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.617555 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.617579 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.617603 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.617621 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:58Z","lastTransitionTime":"2026-01-20T16:32:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.720736 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.720815 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.720839 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.720873 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.720898 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:58Z","lastTransitionTime":"2026-01-20T16:32:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.823943 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.824012 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.824035 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.824063 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.824118 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:58Z","lastTransitionTime":"2026-01-20T16:32:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.832062 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:58 crc kubenswrapper[4995]: E0120 16:32:58.832260 4995 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:32:58 crc kubenswrapper[4995]: E0120 16:32:58.832330 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs podName:9dfc8bb5-28e8-4ba3-8009-09d5585a1a12 nodeName:}" failed. No retries permitted until 2026-01-20 16:34:02.832307985 +0000 UTC m=+161.076912821 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs") pod "network-metrics-daemon-kbdtf" (UID: "9dfc8bb5-28e8-4ba3-8009-09d5585a1a12") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.927666 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.927832 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.927926 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.928020 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.928136 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:58Z","lastTransitionTime":"2026-01-20T16:32:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:58 crc kubenswrapper[4995]: I0120 16:32:58.989296 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:32:58 crc kubenswrapper[4995]: E0120 16:32:58.989487 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.031234 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.031283 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.031298 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.031316 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.031327 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.046833 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 15:59:47.44851919 +0000 UTC Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.134862 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.134907 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.134918 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.134935 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.134948 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.237881 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.237979 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.237996 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.238021 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.238038 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.340970 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.341023 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.341039 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.341062 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.341106 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.443783 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.443833 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.443853 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.443876 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.443892 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.548756 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.548853 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.548879 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.548912 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.548944 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.651560 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.651632 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.651705 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.651730 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.651747 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.755966 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.756033 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.756051 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.756113 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.756132 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.859208 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.859271 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.859287 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.859312 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.859329 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.962161 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.962194 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.962206 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.962221 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.962231 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:32:59Z","lastTransitionTime":"2026-01-20T16:32:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.988943 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.989008 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:32:59 crc kubenswrapper[4995]: E0120 16:32:59.989060 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:32:59 crc kubenswrapper[4995]: I0120 16:32:59.988943 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:32:59 crc kubenswrapper[4995]: E0120 16:32:59.989164 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:32:59 crc kubenswrapper[4995]: E0120 16:32:59.989354 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.046959 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 02:32:36.836857569 +0000 UTC Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.064744 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.064778 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.064793 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.064814 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.064830 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.168152 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.168201 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.168213 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.168231 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.168246 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.271842 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.271912 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.271929 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.271952 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.271969 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.374844 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.374892 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.374907 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.374927 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.374945 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.477912 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.477982 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.478024 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.478062 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.478133 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.581484 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.581563 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.581587 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.581616 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.581639 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.684464 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.684575 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.684600 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.684629 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.684652 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.786844 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.786910 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.786936 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.786960 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.786978 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.889723 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.889814 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.889833 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.889864 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.889900 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.989418 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:00 crc kubenswrapper[4995]: E0120 16:33:00.989657 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.990728 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:33:00 crc kubenswrapper[4995]: E0120 16:33:00.990996 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qp9h9_openshift-ovn-kubernetes(e82420c5-a3ae-43ea-a208-b757794521a6)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.992613 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.992685 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.992711 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.992734 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:00 crc kubenswrapper[4995]: I0120 16:33:00.992750 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:00Z","lastTransitionTime":"2026-01-20T16:33:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.047716 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 18:13:47.662196134 +0000 UTC Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.095255 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.095325 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.095347 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.095374 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.095396 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:01Z","lastTransitionTime":"2026-01-20T16:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.198906 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.198956 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.198973 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.198995 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.199014 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:01Z","lastTransitionTime":"2026-01-20T16:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.301942 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.302023 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.302047 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.302072 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.302129 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:01Z","lastTransitionTime":"2026-01-20T16:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.405176 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.405276 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.405300 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.405331 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.405353 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:01Z","lastTransitionTime":"2026-01-20T16:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.508412 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.508473 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.508490 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.508513 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.508533 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:01Z","lastTransitionTime":"2026-01-20T16:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.611521 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.611578 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.611594 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.611614 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.611632 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:01Z","lastTransitionTime":"2026-01-20T16:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.715132 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.715194 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.715216 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.715243 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.715263 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:01Z","lastTransitionTime":"2026-01-20T16:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.818571 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.818625 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.818641 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.818664 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.818683 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:01Z","lastTransitionTime":"2026-01-20T16:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.922052 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.922159 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.922176 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.922201 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.922220 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:01Z","lastTransitionTime":"2026-01-20T16:33:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.989458 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.989670 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:01 crc kubenswrapper[4995]: I0120 16:33:01.989907 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:01 crc kubenswrapper[4995]: E0120 16:33:01.989897 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:01 crc kubenswrapper[4995]: E0120 16:33:01.990187 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:01 crc kubenswrapper[4995]: E0120 16:33:01.990365 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.007347 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=26.007318986 podStartE2EDuration="26.007318986s" podCreationTimestamp="2026-01-20 16:32:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.007275884 +0000 UTC m=+100.251880760" watchObservedRunningTime="2026-01-20 16:33:02.007318986 +0000 UTC m=+100.251923832" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.026743 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.027052 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.027151 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.027255 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.027329 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:02Z","lastTransitionTime":"2026-01-20T16:33:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.047992 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 00:51:32.007638887 +0000 UTC Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.104976 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podStartSLOduration=82.104955994 podStartE2EDuration="1m22.104955994s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.08654847 +0000 UTC m=+100.331153286" watchObservedRunningTime="2026-01-20 16:33:02.104955994 +0000 UTC m=+100.349560810" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.120797 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-75tq2" podStartSLOduration=81.120777697 podStartE2EDuration="1m21.120777697s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.105263483 +0000 UTC m=+100.349868369" watchObservedRunningTime="2026-01-20 16:33:02.120777697 +0000 UTC m=+100.365382513" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.130290 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.130372 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.130397 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.130429 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.130453 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:02Z","lastTransitionTime":"2026-01-20T16:33:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.137768 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=82.137749521 podStartE2EDuration="1m22.137749521s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.137503624 +0000 UTC m=+100.382108450" watchObservedRunningTime="2026-01-20 16:33:02.137749521 +0000 UTC m=+100.382354337" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.195507 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-pgz94" podStartSLOduration=83.195481774 podStartE2EDuration="1m23.195481774s" podCreationTimestamp="2026-01-20 16:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.176785332 +0000 UTC m=+100.421390178" watchObservedRunningTime="2026-01-20 16:33:02.195481774 +0000 UTC m=+100.440086610" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.196131 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-vj5zz" podStartSLOduration=82.196120042 podStartE2EDuration="1m22.196120042s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.194703272 +0000 UTC m=+100.439308088" watchObservedRunningTime="2026-01-20 16:33:02.196120042 +0000 UTC m=+100.440724878" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.233534 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.233579 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.233590 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.233608 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.233620 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:02Z","lastTransitionTime":"2026-01-20T16:33:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.235869 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=80.235844782 podStartE2EDuration="1m20.235844782s" podCreationTimestamp="2026-01-20 16:31:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.234426962 +0000 UTC m=+100.479031778" watchObservedRunningTime="2026-01-20 16:33:02.235844782 +0000 UTC m=+100.480449608" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.236456 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-hqgw4" podStartSLOduration=82.236447849 podStartE2EDuration="1m22.236447849s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.207212752 +0000 UTC m=+100.451817578" watchObservedRunningTime="2026-01-20 16:33:02.236447849 +0000 UTC m=+100.481052675" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.254810 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=78.254790882 podStartE2EDuration="1m18.254790882s" podCreationTimestamp="2026-01-20 16:31:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.253853645 +0000 UTC m=+100.498458481" watchObservedRunningTime="2026-01-20 16:33:02.254790882 +0000 UTC m=+100.499395728" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.274044 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=52.274027029 podStartE2EDuration="52.274027029s" podCreationTimestamp="2026-01-20 16:32:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.273152054 +0000 UTC m=+100.517756950" watchObservedRunningTime="2026-01-20 16:33:02.274027029 +0000 UTC m=+100.518631845" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.305705 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-vlvwg" podStartSLOduration=82.305688704 podStartE2EDuration="1m22.305688704s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:02.305356214 +0000 UTC m=+100.549961051" watchObservedRunningTime="2026-01-20 16:33:02.305688704 +0000 UTC m=+100.550293520" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.335951 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.335993 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.336006 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.336022 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.336034 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:02Z","lastTransitionTime":"2026-01-20T16:33:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.439704 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.439755 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.439767 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.439785 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.439796 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:02Z","lastTransitionTime":"2026-01-20T16:33:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.463219 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.463246 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.463255 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.463266 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.463275 4995 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T16:33:02Z","lastTransitionTime":"2026-01-20T16:33:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.521487 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr"] Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.521858 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.525464 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.525489 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.525494 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.527141 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.575783 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6cef6725-e6d8-4536-8fc4-7f8b92154bee-service-ca\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.575865 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/6cef6725-e6d8-4536-8fc4-7f8b92154bee-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.575903 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/6cef6725-e6d8-4536-8fc4-7f8b92154bee-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.576139 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6cef6725-e6d8-4536-8fc4-7f8b92154bee-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.576203 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6cef6725-e6d8-4536-8fc4-7f8b92154bee-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.677639 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6cef6725-e6d8-4536-8fc4-7f8b92154bee-service-ca\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.677703 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/6cef6725-e6d8-4536-8fc4-7f8b92154bee-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.677753 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/6cef6725-e6d8-4536-8fc4-7f8b92154bee-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.677829 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6cef6725-e6d8-4536-8fc4-7f8b92154bee-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.677863 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6cef6725-e6d8-4536-8fc4-7f8b92154bee-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.677937 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/6cef6725-e6d8-4536-8fc4-7f8b92154bee-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.677980 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/6cef6725-e6d8-4536-8fc4-7f8b92154bee-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.679407 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6cef6725-e6d8-4536-8fc4-7f8b92154bee-service-ca\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.687466 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6cef6725-e6d8-4536-8fc4-7f8b92154bee-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.708460 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6cef6725-e6d8-4536-8fc4-7f8b92154bee-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-nv4pr\" (UID: \"6cef6725-e6d8-4536-8fc4-7f8b92154bee\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.847198 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" Jan 20 16:33:02 crc kubenswrapper[4995]: I0120 16:33:02.989461 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:02 crc kubenswrapper[4995]: E0120 16:33:02.989615 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:03 crc kubenswrapper[4995]: I0120 16:33:03.049144 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 18:36:28.53083445 +0000 UTC Jan 20 16:33:03 crc kubenswrapper[4995]: I0120 16:33:03.049247 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 20 16:33:03 crc kubenswrapper[4995]: I0120 16:33:03.057337 4995 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 20 16:33:03 crc kubenswrapper[4995]: I0120 16:33:03.498851 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" event={"ID":"6cef6725-e6d8-4536-8fc4-7f8b92154bee","Type":"ContainerStarted","Data":"9d9bd5e1ba9867959b1fa85e49014e05466851410cbe97ab0beceb6acb1e97ff"} Jan 20 16:33:03 crc kubenswrapper[4995]: I0120 16:33:03.499238 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" event={"ID":"6cef6725-e6d8-4536-8fc4-7f8b92154bee","Type":"ContainerStarted","Data":"fdfcd6f810a11fe6c5b595fd1c1f53856a0284be9783db8cecace79870824877"} Jan 20 16:33:03 crc kubenswrapper[4995]: I0120 16:33:03.519261 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-nv4pr" podStartSLOduration=83.519233754 podStartE2EDuration="1m23.519233754s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:03.518756111 +0000 UTC m=+101.763360947" watchObservedRunningTime="2026-01-20 16:33:03.519233754 +0000 UTC m=+101.763838590" Jan 20 16:33:03 crc kubenswrapper[4995]: I0120 16:33:03.988654 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:03 crc kubenswrapper[4995]: E0120 16:33:03.988930 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:03 crc kubenswrapper[4995]: I0120 16:33:03.988994 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:03 crc kubenswrapper[4995]: I0120 16:33:03.989191 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:03 crc kubenswrapper[4995]: E0120 16:33:03.989444 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:03 crc kubenswrapper[4995]: E0120 16:33:03.989700 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:04 crc kubenswrapper[4995]: I0120 16:33:04.989249 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:04 crc kubenswrapper[4995]: E0120 16:33:04.989534 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:05 crc kubenswrapper[4995]: I0120 16:33:05.988755 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:05 crc kubenswrapper[4995]: I0120 16:33:05.988803 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:05 crc kubenswrapper[4995]: E0120 16:33:05.989477 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:05 crc kubenswrapper[4995]: I0120 16:33:05.988859 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:05 crc kubenswrapper[4995]: E0120 16:33:05.989611 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:05 crc kubenswrapper[4995]: E0120 16:33:05.989687 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:06 crc kubenswrapper[4995]: I0120 16:33:06.989201 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:06 crc kubenswrapper[4995]: E0120 16:33:06.989715 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:07 crc kubenswrapper[4995]: I0120 16:33:07.988665 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:07 crc kubenswrapper[4995]: I0120 16:33:07.988733 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:07 crc kubenswrapper[4995]: E0120 16:33:07.988901 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:07 crc kubenswrapper[4995]: E0120 16:33:07.989071 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:07 crc kubenswrapper[4995]: I0120 16:33:07.990175 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:07 crc kubenswrapper[4995]: E0120 16:33:07.990732 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:08 crc kubenswrapper[4995]: I0120 16:33:08.988959 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:08 crc kubenswrapper[4995]: E0120 16:33:08.989201 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:09 crc kubenswrapper[4995]: I0120 16:33:09.988842 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:09 crc kubenswrapper[4995]: E0120 16:33:09.989134 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:09 crc kubenswrapper[4995]: I0120 16:33:09.989176 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:09 crc kubenswrapper[4995]: E0120 16:33:09.989398 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:09 crc kubenswrapper[4995]: I0120 16:33:09.989502 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:09 crc kubenswrapper[4995]: E0120 16:33:09.989665 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:10 crc kubenswrapper[4995]: I0120 16:33:10.988586 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:10 crc kubenswrapper[4995]: E0120 16:33:10.989143 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:11 crc kubenswrapper[4995]: I0120 16:33:11.989138 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:11 crc kubenswrapper[4995]: I0120 16:33:11.989309 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:11 crc kubenswrapper[4995]: E0120 16:33:11.989463 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:11 crc kubenswrapper[4995]: I0120 16:33:11.989507 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:11 crc kubenswrapper[4995]: E0120 16:33:11.992462 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:11 crc kubenswrapper[4995]: E0120 16:33:11.992849 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:12 crc kubenswrapper[4995]: I0120 16:33:12.989399 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:12 crc kubenswrapper[4995]: I0120 16:33:12.989955 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:33:12 crc kubenswrapper[4995]: E0120 16:33:12.990266 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:13 crc kubenswrapper[4995]: I0120 16:33:13.535313 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/3.log" Jan 20 16:33:13 crc kubenswrapper[4995]: I0120 16:33:13.538159 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerStarted","Data":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} Jan 20 16:33:13 crc kubenswrapper[4995]: I0120 16:33:13.538546 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:33:13 crc kubenswrapper[4995]: I0120 16:33:13.567344 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podStartSLOduration=92.567313673 podStartE2EDuration="1m32.567313673s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:13.566120879 +0000 UTC m=+111.810725675" watchObservedRunningTime="2026-01-20 16:33:13.567313673 +0000 UTC m=+111.811918479" Jan 20 16:33:13 crc kubenswrapper[4995]: I0120 16:33:13.989494 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:13 crc kubenswrapper[4995]: I0120 16:33:13.989566 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:13 crc kubenswrapper[4995]: I0120 16:33:13.989632 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:13 crc kubenswrapper[4995]: E0120 16:33:13.990208 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:13 crc kubenswrapper[4995]: E0120 16:33:13.990477 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:13 crc kubenswrapper[4995]: E0120 16:33:13.990570 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:14 crc kubenswrapper[4995]: I0120 16:33:14.119518 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-kbdtf"] Jan 20 16:33:14 crc kubenswrapper[4995]: I0120 16:33:14.119654 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:14 crc kubenswrapper[4995]: E0120 16:33:14.119826 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:14 crc kubenswrapper[4995]: I0120 16:33:14.542719 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/1.log" Jan 20 16:33:14 crc kubenswrapper[4995]: I0120 16:33:14.543148 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/0.log" Jan 20 16:33:14 crc kubenswrapper[4995]: I0120 16:33:14.543187 4995 generic.go:334] "Generic (PLEG): container finished" podID="5008a882-4540-4ebe-8a27-53f0de0cbd4a" containerID="1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842" exitCode=1 Jan 20 16:33:14 crc kubenswrapper[4995]: I0120 16:33:14.544041 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vlvwg" event={"ID":"5008a882-4540-4ebe-8a27-53f0de0cbd4a","Type":"ContainerDied","Data":"1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842"} Jan 20 16:33:14 crc kubenswrapper[4995]: I0120 16:33:14.544117 4995 scope.go:117] "RemoveContainer" containerID="f64e20e894413cab6a3cc99318441bfd5b0f3f96f19f24121271785e9e0273b1" Jan 20 16:33:14 crc kubenswrapper[4995]: I0120 16:33:14.544493 4995 scope.go:117] "RemoveContainer" containerID="1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842" Jan 20 16:33:14 crc kubenswrapper[4995]: E0120 16:33:14.544680 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-vlvwg_openshift-multus(5008a882-4540-4ebe-8a27-53f0de0cbd4a)\"" pod="openshift-multus/multus-vlvwg" podUID="5008a882-4540-4ebe-8a27-53f0de0cbd4a" Jan 20 16:33:15 crc kubenswrapper[4995]: I0120 16:33:15.549549 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/1.log" Jan 20 16:33:15 crc kubenswrapper[4995]: I0120 16:33:15.989124 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:15 crc kubenswrapper[4995]: I0120 16:33:15.989189 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:15 crc kubenswrapper[4995]: I0120 16:33:15.989238 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:15 crc kubenswrapper[4995]: I0120 16:33:15.989385 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:15 crc kubenswrapper[4995]: E0120 16:33:15.989368 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:15 crc kubenswrapper[4995]: E0120 16:33:15.989526 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:15 crc kubenswrapper[4995]: E0120 16:33:15.989626 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:15 crc kubenswrapper[4995]: E0120 16:33:15.989753 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:17 crc kubenswrapper[4995]: I0120 16:33:17.989165 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:17 crc kubenswrapper[4995]: I0120 16:33:17.989312 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:17 crc kubenswrapper[4995]: I0120 16:33:17.989304 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:17 crc kubenswrapper[4995]: I0120 16:33:17.989233 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:17 crc kubenswrapper[4995]: E0120 16:33:17.989424 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:17 crc kubenswrapper[4995]: E0120 16:33:17.989569 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:17 crc kubenswrapper[4995]: E0120 16:33:17.989749 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:17 crc kubenswrapper[4995]: E0120 16:33:17.989968 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:19 crc kubenswrapper[4995]: I0120 16:33:19.988614 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:19 crc kubenswrapper[4995]: I0120 16:33:19.988667 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:19 crc kubenswrapper[4995]: I0120 16:33:19.988681 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:19 crc kubenswrapper[4995]: E0120 16:33:19.988711 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:19 crc kubenswrapper[4995]: E0120 16:33:19.988801 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:19 crc kubenswrapper[4995]: E0120 16:33:19.988925 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:19 crc kubenswrapper[4995]: I0120 16:33:19.988989 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:19 crc kubenswrapper[4995]: E0120 16:33:19.989125 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:21 crc kubenswrapper[4995]: E0120 16:33:21.987264 4995 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 20 16:33:21 crc kubenswrapper[4995]: I0120 16:33:21.988639 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:21 crc kubenswrapper[4995]: I0120 16:33:21.988646 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:21 crc kubenswrapper[4995]: I0120 16:33:21.988718 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:21 crc kubenswrapper[4995]: I0120 16:33:21.990918 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:21 crc kubenswrapper[4995]: E0120 16:33:21.990842 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:21 crc kubenswrapper[4995]: E0120 16:33:21.991115 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:21 crc kubenswrapper[4995]: E0120 16:33:21.991259 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:21 crc kubenswrapper[4995]: E0120 16:33:21.991382 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:22 crc kubenswrapper[4995]: E0120 16:33:22.077717 4995 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 20 16:33:22 crc kubenswrapper[4995]: I0120 16:33:22.401424 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:33:23 crc kubenswrapper[4995]: I0120 16:33:23.989317 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:23 crc kubenswrapper[4995]: I0120 16:33:23.989388 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:23 crc kubenswrapper[4995]: I0120 16:33:23.989414 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:23 crc kubenswrapper[4995]: E0120 16:33:23.989561 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:23 crc kubenswrapper[4995]: I0120 16:33:23.989595 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:23 crc kubenswrapper[4995]: E0120 16:33:23.989756 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:23 crc kubenswrapper[4995]: E0120 16:33:23.989930 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:23 crc kubenswrapper[4995]: E0120 16:33:23.990045 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:25 crc kubenswrapper[4995]: I0120 16:33:25.989676 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:25 crc kubenswrapper[4995]: E0120 16:33:25.989810 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:25 crc kubenswrapper[4995]: I0120 16:33:25.989926 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:25 crc kubenswrapper[4995]: I0120 16:33:25.989928 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:25 crc kubenswrapper[4995]: I0120 16:33:25.989947 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:25 crc kubenswrapper[4995]: E0120 16:33:25.990388 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:25 crc kubenswrapper[4995]: E0120 16:33:25.990528 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:25 crc kubenswrapper[4995]: E0120 16:33:25.990642 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:27 crc kubenswrapper[4995]: E0120 16:33:27.078961 4995 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 20 16:33:27 crc kubenswrapper[4995]: I0120 16:33:27.988641 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:27 crc kubenswrapper[4995]: I0120 16:33:27.988726 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:27 crc kubenswrapper[4995]: I0120 16:33:27.988664 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:27 crc kubenswrapper[4995]: I0120 16:33:27.988656 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:27 crc kubenswrapper[4995]: E0120 16:33:27.988860 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:27 crc kubenswrapper[4995]: E0120 16:33:27.988960 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:27 crc kubenswrapper[4995]: E0120 16:33:27.989258 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:27 crc kubenswrapper[4995]: E0120 16:33:27.989377 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:29 crc kubenswrapper[4995]: I0120 16:33:29.989555 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:29 crc kubenswrapper[4995]: E0120 16:33:29.989728 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:29 crc kubenswrapper[4995]: I0120 16:33:29.990382 4995 scope.go:117] "RemoveContainer" containerID="1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842" Jan 20 16:33:29 crc kubenswrapper[4995]: I0120 16:33:29.990440 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:29 crc kubenswrapper[4995]: I0120 16:33:29.990532 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:29 crc kubenswrapper[4995]: E0120 16:33:29.990624 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:29 crc kubenswrapper[4995]: E0120 16:33:29.991059 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:29 crc kubenswrapper[4995]: I0120 16:33:29.991212 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:29 crc kubenswrapper[4995]: E0120 16:33:29.991389 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:30 crc kubenswrapper[4995]: I0120 16:33:30.601901 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/1.log" Jan 20 16:33:30 crc kubenswrapper[4995]: I0120 16:33:30.602173 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vlvwg" event={"ID":"5008a882-4540-4ebe-8a27-53f0de0cbd4a","Type":"ContainerStarted","Data":"ae12658acf3b63bc36cb1271992b7137508cfafa7404490f7b7e5544d8dd1545"} Jan 20 16:33:31 crc kubenswrapper[4995]: I0120 16:33:31.989353 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:31 crc kubenswrapper[4995]: I0120 16:33:31.989449 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:31 crc kubenswrapper[4995]: I0120 16:33:31.989478 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:31 crc kubenswrapper[4995]: I0120 16:33:31.989289 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:31 crc kubenswrapper[4995]: E0120 16:33:31.990112 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:31 crc kubenswrapper[4995]: E0120 16:33:31.990314 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:31 crc kubenswrapper[4995]: E0120 16:33:31.990336 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:31 crc kubenswrapper[4995]: E0120 16:33:31.990377 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:32 crc kubenswrapper[4995]: E0120 16:33:32.080298 4995 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 20 16:33:33 crc kubenswrapper[4995]: I0120 16:33:33.989370 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:33 crc kubenswrapper[4995]: E0120 16:33:33.989599 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:33 crc kubenswrapper[4995]: I0120 16:33:33.989975 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:33 crc kubenswrapper[4995]: I0120 16:33:33.990051 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:33 crc kubenswrapper[4995]: E0120 16:33:33.990145 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:33 crc kubenswrapper[4995]: E0120 16:33:33.990305 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:33 crc kubenswrapper[4995]: I0120 16:33:33.990416 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:33 crc kubenswrapper[4995]: E0120 16:33:33.990677 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:35 crc kubenswrapper[4995]: I0120 16:33:35.989468 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:35 crc kubenswrapper[4995]: I0120 16:33:35.989471 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:35 crc kubenswrapper[4995]: E0120 16:33:35.989648 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 16:33:35 crc kubenswrapper[4995]: I0120 16:33:35.989711 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:35 crc kubenswrapper[4995]: E0120 16:33:35.989826 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kbdtf" podUID="9dfc8bb5-28e8-4ba3-8009-09d5585a1a12" Jan 20 16:33:35 crc kubenswrapper[4995]: I0120 16:33:35.990050 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:35 crc kubenswrapper[4995]: E0120 16:33:35.990117 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 16:33:35 crc kubenswrapper[4995]: E0120 16:33:35.990201 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.989625 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.989625 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.989642 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.990157 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.992864 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.993143 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.993183 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.994013 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.994884 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 20 16:33:37 crc kubenswrapper[4995]: I0120 16:33:37.994899 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.620526 4995 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.665323 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.665797 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.666338 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-92c94"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.666760 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-92c94" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.667434 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9z4p9"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.667833 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.668583 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.668886 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.669482 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-rfjfc"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.669793 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.670503 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-l4kss"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.671050 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.675331 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.676424 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.676548 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.676915 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.677038 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.677224 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.677349 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.677953 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.677968 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.678172 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.678405 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.678786 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.690778 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.691352 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.691395 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.691410 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.691352 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.691716 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.692113 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.692205 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.693273 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.693874 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.701562 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.701778 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.702218 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.702542 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.702638 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.702821 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.702924 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.703053 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.703278 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.703289 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.703369 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.703394 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.703413 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.703454 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.705129 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.705691 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.706144 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-7wptg"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.706720 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.707546 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.707941 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.709681 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-jzs8c"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.710093 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.719033 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.719655 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.736331 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.738755 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnw4z\" (UniqueName: \"kubernetes.io/projected/156a6fa0-bc96-492d-88d9-6c4873dff771-kube-api-access-lnw4z\") pod \"downloads-7954f5f757-92c94\" (UID: \"156a6fa0-bc96-492d-88d9-6c4873dff771\") " pod="openshift-console/downloads-7954f5f757-92c94" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.738830 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d07c6d80-c801-48e5-ae4c-89c80c203585-serving-cert\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.738867 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b88fed33-25aa-4de1-9a11-ddd5dc808758-etcd-service-ca\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.738893 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d07c6d80-c801-48e5-ae4c-89c80c203585-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.738923 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg6g8\" (UniqueName: \"kubernetes.io/projected/b88fed33-25aa-4de1-9a11-ddd5dc808758-kube-api-access-pg6g8\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.738946 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d07c6d80-c801-48e5-ae4c-89c80c203585-config\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.738970 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b88fed33-25aa-4de1-9a11-ddd5dc808758-config\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.738990 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739025 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2585d1e6-a851-4ecc-8acd-8fd3d2426576-images\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739054 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e95f8d-9e90-4c7c-8030-05f41539b15c-serving-cert\") pod \"openshift-config-operator-7777fb866f-5f7mb\" (UID: \"00e95f8d-9e90-4c7c-8030-05f41539b15c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739116 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d07c6d80-c801-48e5-ae4c-89c80c203585-service-ca-bundle\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739137 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b88fed33-25aa-4de1-9a11-ddd5dc808758-etcd-client\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739161 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/2585d1e6-a851-4ecc-8acd-8fd3d2426576-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739206 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/00e95f8d-9e90-4c7c-8030-05f41539b15c-available-featuregates\") pod \"openshift-config-operator-7777fb866f-5f7mb\" (UID: \"00e95f8d-9e90-4c7c-8030-05f41539b15c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739231 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdp6v\" (UniqueName: \"kubernetes.io/projected/2585d1e6-a851-4ecc-8acd-8fd3d2426576-kube-api-access-zdp6v\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739253 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/620b0366-f02c-4d94-aceb-a0ee38453cdb-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tbsrs\" (UID: \"620b0366-f02c-4d94-aceb-a0ee38453cdb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739289 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdqmg\" (UniqueName: \"kubernetes.io/projected/d07c6d80-c801-48e5-ae4c-89c80c203585-kube-api-access-bdqmg\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739316 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b88fed33-25aa-4de1-9a11-ddd5dc808758-etcd-ca\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739344 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b88fed33-25aa-4de1-9a11-ddd5dc808758-serving-cert\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739370 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739397 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx6kd\" (UniqueName: \"kubernetes.io/projected/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-kube-api-access-jx6kd\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739431 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/620b0366-f02c-4d94-aceb-a0ee38453cdb-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tbsrs\" (UID: \"620b0366-f02c-4d94-aceb-a0ee38453cdb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739455 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2585d1e6-a851-4ecc-8acd-8fd3d2426576-config\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739481 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdvvd\" (UniqueName: \"kubernetes.io/projected/00e95f8d-9e90-4c7c-8030-05f41539b15c-kube-api-access-mdvvd\") pod \"openshift-config-operator-7777fb866f-5f7mb\" (UID: \"00e95f8d-9e90-4c7c-8030-05f41539b15c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739506 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.739532 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/620b0366-f02c-4d94-aceb-a0ee38453cdb-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tbsrs\" (UID: \"620b0366-f02c-4d94-aceb-a0ee38453cdb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.749947 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.750439 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.777616 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.777942 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.778010 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.778102 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.778121 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.778010 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.778229 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.778289 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.779807 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.780000 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.782797 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-55g56"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.783587 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t456c"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.783820 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-8dc2q"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.784172 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.784887 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.785574 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.786718 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.787110 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.787351 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.787881 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.788379 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.796068 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.796204 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.796341 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.796429 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-6wxvh"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.797253 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.797817 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.798653 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.798788 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.798895 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.799411 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.799509 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.799541 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.799708 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.800058 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.800737 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.801581 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.803885 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.804174 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.804279 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.804428 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.804209 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.804520 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.808771 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.810120 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.810384 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.811116 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.811348 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.811541 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.811772 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.811802 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.812104 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.812110 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.812575 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.812732 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.812889 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.817599 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.818429 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.818948 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.819740 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.819862 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.819988 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.820069 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.820310 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.820432 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.820515 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.820683 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.820848 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.821477 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.821542 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.821647 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.819800 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.822605 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.823203 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.823249 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.823295 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.839172 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.857486 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.868499 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.868834 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869193 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/620b0366-f02c-4d94-aceb-a0ee38453cdb-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tbsrs\" (UID: \"620b0366-f02c-4d94-aceb-a0ee38453cdb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869222 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nh9gr\" (UniqueName: \"kubernetes.io/projected/d87453b6-da26-4486-ac4d-cf2798843007-kube-api-access-nh9gr\") pod \"dns-operator-744455d44c-7wptg\" (UID: \"d87453b6-da26-4486-ac4d-cf2798843007\") " pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869241 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869257 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2421e52-1edf-4403-bf98-3738f28fefa5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-zhdmz\" (UID: \"e2421e52-1edf-4403-bf98-3738f28fefa5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869275 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2585d1e6-a851-4ecc-8acd-8fd3d2426576-config\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869290 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-service-ca\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869306 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlg25\" (UniqueName: \"kubernetes.io/projected/35c557f8-2a15-4838-8042-16c5eb9ae8af-kube-api-access-wlg25\") pod \"openshift-controller-manager-operator-756b6f6bc6-rvmnn\" (UID: \"35c557f8-2a15-4838-8042-16c5eb9ae8af\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869321 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869339 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869360 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdvvd\" (UniqueName: \"kubernetes.io/projected/00e95f8d-9e90-4c7c-8030-05f41539b15c-kube-api-access-mdvvd\") pod \"openshift-config-operator-7777fb866f-5f7mb\" (UID: \"00e95f8d-9e90-4c7c-8030-05f41539b15c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869374 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869391 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/620b0366-f02c-4d94-aceb-a0ee38453cdb-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tbsrs\" (UID: \"620b0366-f02c-4d94-aceb-a0ee38453cdb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869406 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd2t2\" (UniqueName: \"kubernetes.io/projected/10d3852f-ae68-471d-8501-a31f353ae0cd-kube-api-access-fd2t2\") pod \"marketplace-operator-79b997595-t456c\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869430 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv6tr\" (UniqueName: \"kubernetes.io/projected/ab7c9813-036f-4c85-82f8-389afa92ecbc-kube-api-access-rv6tr\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869450 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnw4z\" (UniqueName: \"kubernetes.io/projected/156a6fa0-bc96-492d-88d9-6c4873dff771-kube-api-access-lnw4z\") pod \"downloads-7954f5f757-92c94\" (UID: \"156a6fa0-bc96-492d-88d9-6c4873dff771\") " pod="openshift-console/downloads-7954f5f757-92c94" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869466 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d07c6d80-c801-48e5-ae4c-89c80c203585-serving-cert\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869486 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b88fed33-25aa-4de1-9a11-ddd5dc808758-etcd-service-ca\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869501 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d07c6d80-c801-48e5-ae4c-89c80c203585-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869518 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg6g8\" (UniqueName: \"kubernetes.io/projected/b88fed33-25aa-4de1-9a11-ddd5dc808758-kube-api-access-pg6g8\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869535 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d07c6d80-c801-48e5-ae4c-89c80c203585-config\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869550 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k6gx\" (UniqueName: \"kubernetes.io/projected/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-kube-api-access-8k6gx\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869569 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b88fed33-25aa-4de1-9a11-ddd5dc808758-config\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869587 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869608 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869623 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab7c9813-036f-4c85-82f8-389afa92ecbc-trusted-ca\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869642 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2585d1e6-a851-4ecc-8acd-8fd3d2426576-images\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869657 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b50c17b-56ad-4488-851a-2ace4d4c1184-auth-proxy-config\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869672 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-t456c\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869685 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-policies\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869729 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869749 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jjvx\" (UniqueName: \"kubernetes.io/projected/b16b30f2-2578-46ab-b622-4f979193c91c-kube-api-access-5jjvx\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869769 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e95f8d-9e90-4c7c-8030-05f41539b15c-serving-cert\") pod \"openshift-config-operator-7777fb866f-5f7mb\" (UID: \"00e95f8d-9e90-4c7c-8030-05f41539b15c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869786 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-t456c\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869803 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-metrics-tls\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869829 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d07c6d80-c801-48e5-ae4c-89c80c203585-service-ca-bundle\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869844 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d87453b6-da26-4486-ac4d-cf2798843007-metrics-tls\") pod \"dns-operator-744455d44c-7wptg\" (UID: \"d87453b6-da26-4486-ac4d-cf2798843007\") " pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869860 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b88fed33-25aa-4de1-9a11-ddd5dc808758-etcd-client\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869874 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/2585d1e6-a851-4ecc-8acd-8fd3d2426576-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869890 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-dir\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869913 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-serving-cert\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869927 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrnrv\" (UniqueName: \"kubernetes.io/projected/2cd4e02b-cb10-4bb2-b318-d24372346b1d-kube-api-access-xrnrv\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869941 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35c557f8-2a15-4838-8042-16c5eb9ae8af-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-rvmnn\" (UID: \"35c557f8-2a15-4838-8042-16c5eb9ae8af\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869955 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869969 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.869992 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/00e95f8d-9e90-4c7c-8030-05f41539b15c-available-featuregates\") pod \"openshift-config-operator-7777fb866f-5f7mb\" (UID: \"00e95f8d-9e90-4c7c-8030-05f41539b15c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870008 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg9qw\" (UniqueName: \"kubernetes.io/projected/0b50c17b-56ad-4488-851a-2ace4d4c1184-kube-api-access-qg9qw\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870027 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdp6v\" (UniqueName: \"kubernetes.io/projected/2585d1e6-a851-4ecc-8acd-8fd3d2426576-kube-api-access-zdp6v\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870043 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/620b0366-f02c-4d94-aceb-a0ee38453cdb-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tbsrs\" (UID: \"620b0366-f02c-4d94-aceb-a0ee38453cdb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870058 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b50c17b-56ad-4488-851a-2ace4d4c1184-config\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870090 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870104 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-trusted-ca\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870120 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b16b30f2-2578-46ab-b622-4f979193c91c-images\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870134 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b16b30f2-2578-46ab-b622-4f979193c91c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870159 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdqmg\" (UniqueName: \"kubernetes.io/projected/d07c6d80-c801-48e5-ae4c-89c80c203585-kube-api-access-bdqmg\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870179 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b88fed33-25aa-4de1-9a11-ddd5dc808758-etcd-ca\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870194 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c557f8-2a15-4838-8042-16c5eb9ae8af-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-rvmnn\" (UID: \"35c557f8-2a15-4838-8042-16c5eb9ae8af\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870209 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2421e52-1edf-4403-bf98-3738f28fefa5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-zhdmz\" (UID: \"e2421e52-1edf-4403-bf98-3738f28fefa5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870225 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwkg5\" (UniqueName: \"kubernetes.io/projected/e2421e52-1edf-4403-bf98-3738f28fefa5-kube-api-access-kwkg5\") pod \"openshift-apiserver-operator-796bbdcf4f-zhdmz\" (UID: \"e2421e52-1edf-4403-bf98-3738f28fefa5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870241 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-bound-sa-token\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870257 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-oauth-serving-cert\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870272 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870287 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b16b30f2-2578-46ab-b622-4f979193c91c-proxy-tls\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870303 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-oauth-config\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870317 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b629e2c3-ed8f-4f3e-af06-ec30249e2af7-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-jpgkb\" (UID: \"b629e2c3-ed8f-4f3e-af06-ec30249e2af7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870334 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-config\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870349 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-trusted-ca-bundle\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870363 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjcjh\" (UniqueName: \"kubernetes.io/projected/b629e2c3-ed8f-4f3e-af06-ec30249e2af7-kube-api-access-sjcjh\") pod \"cluster-samples-operator-665b6dd947-jpgkb\" (UID: \"b629e2c3-ed8f-4f3e-af06-ec30249e2af7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870378 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870394 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870414 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b88fed33-25aa-4de1-9a11-ddd5dc808758-serving-cert\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870431 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870446 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0b50c17b-56ad-4488-851a-2ace4d4c1184-machine-approver-tls\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870463 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab7c9813-036f-4c85-82f8-389afa92ecbc-config\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870479 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab7c9813-036f-4c85-82f8-389afa92ecbc-serving-cert\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870499 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx6kd\" (UniqueName: \"kubernetes.io/projected/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-kube-api-access-jx6kd\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.870516 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tp775\" (UniqueName: \"kubernetes.io/projected/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-kube-api-access-tp775\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.871489 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5944b"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.872212 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/00e95f8d-9e90-4c7c-8030-05f41539b15c-available-featuregates\") pod \"openshift-config-operator-7777fb866f-5f7mb\" (UID: \"00e95f8d-9e90-4c7c-8030-05f41539b15c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.872784 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.872877 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/b88fed33-25aa-4de1-9a11-ddd5dc808758-etcd-ca\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.872901 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/620b0366-f02c-4d94-aceb-a0ee38453cdb-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tbsrs\" (UID: \"620b0366-f02c-4d94-aceb-a0ee38453cdb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.873241 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.873497 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-vw77m"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.873563 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2585d1e6-a851-4ecc-8acd-8fd3d2426576-images\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.873663 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.873892 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.874577 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d07c6d80-c801-48e5-ae4c-89c80c203585-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.874813 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.875426 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d07c6d80-c801-48e5-ae4c-89c80c203585-service-ca-bundle\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.875621 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d07c6d80-c801-48e5-ae4c-89c80c203585-config\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.874858 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.875804 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.876151 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.876459 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.876536 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d07c6d80-c801-48e5-ae4c-89c80c203585-serving-cert\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.876561 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/620b0366-f02c-4d94-aceb-a0ee38453cdb-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tbsrs\" (UID: \"620b0366-f02c-4d94-aceb-a0ee38453cdb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.876891 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/b88fed33-25aa-4de1-9a11-ddd5dc808758-etcd-service-ca\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.877090 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-62hj9"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.877283 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b88fed33-25aa-4de1-9a11-ddd5dc808758-config\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.877523 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2585d1e6-a851-4ecc-8acd-8fd3d2426576-config\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.878005 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.878435 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.878802 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.879011 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.879187 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.879283 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.879428 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.879519 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00e95f8d-9e90-4c7c-8030-05f41539b15c-serving-cert\") pod \"openshift-config-operator-7777fb866f-5f7mb\" (UID: \"00e95f8d-9e90-4c7c-8030-05f41539b15c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.877136 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.879698 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.879807 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/b88fed33-25aa-4de1-9a11-ddd5dc808758-etcd-client\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.879712 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.879895 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.880270 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.880587 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/2585d1e6-a851-4ecc-8acd-8fd3d2426576-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.880870 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.880974 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-prbmb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.881438 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.881740 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.882031 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.882481 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.883003 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.883758 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-9m9xl"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.884111 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.884827 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b88fed33-25aa-4de1-9a11-ddd5dc808758-serving-cert\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.885215 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.885242 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.887207 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.887240 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.887264 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.887786 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.888044 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.891149 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.892132 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.894045 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.894616 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.894682 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.895103 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-rfjfc"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.895156 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.896300 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.897438 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-l4kss"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.904186 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.907845 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.911412 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-92c94"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.915180 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.917757 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9z4p9"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.919296 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.921670 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.922759 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.923908 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-55g56"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.925093 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-6wxvh"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.927197 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.927666 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.928973 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t456c"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.929964 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-jzs8c"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.930974 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-gdlqb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.932295 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-k24sk"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.932426 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.933164 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-8dc2q"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.933306 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.933976 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.934968 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.936980 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.938054 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-7wptg"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.940541 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.942084 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-vw77m"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.943366 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-hpccj"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.945300 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5944b"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.945438 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.945765 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.946875 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.947161 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.947920 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.948991 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-62hj9"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.950628 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.951661 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-prbmb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.952689 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.953749 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.954757 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.955808 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-gdlqb"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.956907 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.959692 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.961219 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.963018 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.964673 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-hpccj"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.966237 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-zd9pn"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.967170 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-zd9pn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.967683 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-zd9pn"] Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.967752 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971179 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvgcv\" (UniqueName: \"kubernetes.io/projected/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-kube-api-access-pvgcv\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971216 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k6gx\" (UniqueName: \"kubernetes.io/projected/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-kube-api-access-8k6gx\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971256 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5vw8\" (UniqueName: \"kubernetes.io/projected/5573e17e-4b7e-4afd-8608-e8afd1c98256-kube-api-access-j5vw8\") pod \"control-plane-machine-set-operator-78cbb6b69f-xxvtq\" (UID: \"5573e17e-4b7e-4afd-8608-e8afd1c98256\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971284 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8c61473-94e6-460c-a307-1b2f727a24ed-serving-cert\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971302 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-default-certificate\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971335 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5573e17e-4b7e-4afd-8608-e8afd1c98256-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-xxvtq\" (UID: \"5573e17e-4b7e-4afd-8608-e8afd1c98256\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971352 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971371 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkqcq\" (UniqueName: \"kubernetes.io/projected/a0d62048-3df5-4737-adc3-5544e1402f06-kube-api-access-vkqcq\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971407 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971426 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab7c9813-036f-4c85-82f8-389afa92ecbc-trusted-ca\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971446 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b77c9d0-1343-4fd0-992a-bd2a4517bf0a-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-bcs2r\" (UID: \"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971465 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfxsv\" (UniqueName: \"kubernetes.io/projected/b01271cc-9d58-4948-9a83-564b481d8eff-kube-api-access-zfxsv\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971502 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-t456c\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971521 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-policies\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971538 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971573 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-stats-auth\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971593 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b50c17b-56ad-4488-851a-2ace4d4c1184-auth-proxy-config\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971611 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jjvx\" (UniqueName: \"kubernetes.io/projected/b16b30f2-2578-46ab-b622-4f979193c91c-kube-api-access-5jjvx\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971658 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7jhw\" (UniqueName: \"kubernetes.io/projected/010bede8-799b-47ad-88ae-3da08414f00e-kube-api-access-s7jhw\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971679 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-t456c\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971696 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-metrics-tls\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971731 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b77c9d0-1343-4fd0-992a-bd2a4517bf0a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-bcs2r\" (UID: \"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971749 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-metrics-certs\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971779 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d87453b6-da26-4486-ac4d-cf2798843007-metrics-tls\") pod \"dns-operator-744455d44c-7wptg\" (UID: \"d87453b6-da26-4486-ac4d-cf2798843007\") " pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971812 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-dir\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971829 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrnrv\" (UniqueName: \"kubernetes.io/projected/2cd4e02b-cb10-4bb2-b318-d24372346b1d-kube-api-access-xrnrv\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971846 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35c557f8-2a15-4838-8042-16c5eb9ae8af-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-rvmnn\" (UID: \"35c557f8-2a15-4838-8042-16c5eb9ae8af\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971863 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971898 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971927 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-serving-cert\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971963 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b897ebd4-5389-4a32-84d2-27f584c7faab-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wxtx6\" (UID: \"b897ebd4-5389-4a32-84d2-27f584c7faab\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.971983 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9czh\" (UniqueName: \"kubernetes.io/projected/c8c61473-94e6-460c-a307-1b2f727a24ed-kube-api-access-g9czh\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.972011 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg9qw\" (UniqueName: \"kubernetes.io/projected/0b50c17b-56ad-4488-851a-2ace4d4c1184-kube-api-access-qg9qw\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.972045 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/b01271cc-9d58-4948-9a83-564b481d8eff-tmpfs\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.972064 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-client-ca\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.972340 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0d62048-3df5-4737-adc3-5544e1402f06-serving-cert\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.972457 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab7c9813-036f-4c85-82f8-389afa92ecbc-trusted-ca\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.972502 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b16b30f2-2578-46ab-b622-4f979193c91c-images\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.973148 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b16b30f2-2578-46ab-b622-4f979193c91c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.972880 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b50c17b-56ad-4488-851a-2ace4d4c1184-auth-proxy-config\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.972540 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-dir\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.973791 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b16b30f2-2578-46ab-b622-4f979193c91c-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.973522 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-policies\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.973566 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-t456c\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.973517 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b16b30f2-2578-46ab-b622-4f979193c91c-images\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.974412 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b50c17b-56ad-4488-851a-2ace4d4c1184-config\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.974511 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.974512 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.974643 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-trusted-ca\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.974800 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-config\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.974920 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b01271cc-9d58-4948-9a83-564b481d8eff-apiservice-cert\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.974940 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b50c17b-56ad-4488-851a-2ace4d4c1184-config\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.974968 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5d649f74-8f55-43de-b6d5-5203c18f126b-srv-cert\") pod \"olm-operator-6b444d44fb-6k4z7\" (UID: \"5d649f74-8f55-43de-b6d5-5203c18f126b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975031 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2421e52-1edf-4403-bf98-3738f28fefa5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-zhdmz\" (UID: \"e2421e52-1edf-4403-bf98-3738f28fefa5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975069 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwkg5\" (UniqueName: \"kubernetes.io/projected/e2421e52-1edf-4403-bf98-3738f28fefa5-kube-api-access-kwkg5\") pod \"openshift-apiserver-operator-796bbdcf4f-zhdmz\" (UID: \"e2421e52-1edf-4403-bf98-3738f28fefa5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975119 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b01271cc-9d58-4948-9a83-564b481d8eff-webhook-cert\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975144 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/43165ab8-b1a3-4885-88f6-bc83ef03f454-config-volume\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975179 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c557f8-2a15-4838-8042-16c5eb9ae8af-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-rvmnn\" (UID: \"35c557f8-2a15-4838-8042-16c5eb9ae8af\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975206 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlm98\" (UniqueName: \"kubernetes.io/projected/43165ab8-b1a3-4885-88f6-bc83ef03f454-kube-api-access-rlm98\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975232 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-bound-sa-token\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975259 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975286 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b16b30f2-2578-46ab-b622-4f979193c91c-proxy-tls\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975312 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-oauth-serving-cert\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975343 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzzgt\" (UniqueName: \"kubernetes.io/projected/5d649f74-8f55-43de-b6d5-5203c18f126b-kube-api-access-nzzgt\") pod \"olm-operator-6b444d44fb-6k4z7\" (UID: \"5d649f74-8f55-43de-b6d5-5203c18f126b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975378 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-oauth-config\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975412 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b629e2c3-ed8f-4f3e-af06-ec30249e2af7-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-jpgkb\" (UID: \"b629e2c3-ed8f-4f3e-af06-ec30249e2af7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975451 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6ndf\" (UniqueName: \"kubernetes.io/projected/b897ebd4-5389-4a32-84d2-27f584c7faab-kube-api-access-b6ndf\") pod \"package-server-manager-789f6589d5-wxtx6\" (UID: \"b897ebd4-5389-4a32-84d2-27f584c7faab\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975491 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-config\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975524 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-trusted-ca-bundle\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975562 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjcjh\" (UniqueName: \"kubernetes.io/projected/b629e2c3-ed8f-4f3e-af06-ec30249e2af7-kube-api-access-sjcjh\") pod \"cluster-samples-operator-665b6dd947-jpgkb\" (UID: \"b629e2c3-ed8f-4f3e-af06-ec30249e2af7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975596 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975637 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975684 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0b50c17b-56ad-4488-851a-2ace4d4c1184-machine-approver-tls\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975715 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab7c9813-036f-4c85-82f8-389afa92ecbc-config\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975746 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab7c9813-036f-4c85-82f8-389afa92ecbc-serving-cert\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975786 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-service-ca-bundle\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975817 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-client-ca\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.975830 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35c557f8-2a15-4838-8042-16c5eb9ae8af-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-rvmnn\" (UID: \"35c557f8-2a15-4838-8042-16c5eb9ae8af\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.976055 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.976399 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-oauth-serving-cert\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.976565 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-serving-cert\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.976746 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-trusted-ca\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977305 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-trusted-ca-bundle\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977408 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5d649f74-8f55-43de-b6d5-5203c18f126b-profile-collector-cert\") pod \"olm-operator-6b444d44fb-6k4z7\" (UID: \"5d649f74-8f55-43de-b6d5-5203c18f126b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977431 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-config\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977623 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tp775\" (UniqueName: \"kubernetes.io/projected/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-kube-api-access-tp775\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977674 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2-srv-cert\") pod \"catalog-operator-68c6474976-5dkkz\" (UID: \"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977694 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b77c9d0-1343-4fd0-992a-bd2a4517bf0a-config\") pod \"kube-apiserver-operator-766d6c64bb-bcs2r\" (UID: \"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977720 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977728 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/43165ab8-b1a3-4885-88f6-bc83ef03f454-secret-volume\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977822 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-config\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977857 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977871 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab7c9813-036f-4c85-82f8-389afa92ecbc-config\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977903 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2421e52-1edf-4403-bf98-3738f28fefa5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-zhdmz\" (UID: \"e2421e52-1edf-4403-bf98-3738f28fefa5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.977951 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/010bede8-799b-47ad-88ae-3da08414f00e-config\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978090 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nh9gr\" (UniqueName: \"kubernetes.io/projected/d87453b6-da26-4486-ac4d-cf2798843007-kube-api-access-nh9gr\") pod \"dns-operator-744455d44c-7wptg\" (UID: \"d87453b6-da26-4486-ac4d-cf2798843007\") " pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978133 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978160 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978217 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-service-ca\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978237 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlg25\" (UniqueName: \"kubernetes.io/projected/35c557f8-2a15-4838-8042-16c5eb9ae8af-kube-api-access-wlg25\") pod \"openshift-controller-manager-operator-756b6f6bc6-rvmnn\" (UID: \"35c557f8-2a15-4838-8042-16c5eb9ae8af\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978289 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/010bede8-799b-47ad-88ae-3da08414f00e-serving-cert\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978653 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/35c557f8-2a15-4838-8042-16c5eb9ae8af-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-rvmnn\" (UID: \"35c557f8-2a15-4838-8042-16c5eb9ae8af\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978753 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978760 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2421e52-1edf-4403-bf98-3738f28fefa5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-zhdmz\" (UID: \"e2421e52-1edf-4403-bf98-3738f28fefa5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978911 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv6tr\" (UniqueName: \"kubernetes.io/projected/ab7c9813-036f-4c85-82f8-389afa92ecbc-kube-api-access-rv6tr\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978939 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2-profile-collector-cert\") pod \"catalog-operator-68c6474976-5dkkz\" (UID: \"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978977 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgbwr\" (UniqueName: \"kubernetes.io/projected/ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2-kube-api-access-kgbwr\") pod \"catalog-operator-68c6474976-5dkkz\" (UID: \"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.978981 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.979071 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd2t2\" (UniqueName: \"kubernetes.io/projected/10d3852f-ae68-471d-8501-a31f353ae0cd-kube-api-access-fd2t2\") pod \"marketplace-operator-79b997595-t456c\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.979626 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-t456c\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.979950 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-service-ca\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.980131 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.980133 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-oauth-config\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.980309 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b16b30f2-2578-46ab-b622-4f979193c91c-proxy-tls\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.980319 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ab7c9813-036f-4c85-82f8-389afa92ecbc-serving-cert\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.980237 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.980699 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.980808 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b629e2c3-ed8f-4f3e-af06-ec30249e2af7-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-jpgkb\" (UID: \"b629e2c3-ed8f-4f3e-af06-ec30249e2af7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.980980 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.981163 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.981664 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.981762 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0b50c17b-56ad-4488-851a-2ace4d4c1184-machine-approver-tls\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.982231 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-metrics-tls\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.983449 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2421e52-1edf-4403-bf98-3738f28fefa5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-zhdmz\" (UID: \"e2421e52-1edf-4403-bf98-3738f28fefa5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:43 crc kubenswrapper[4995]: I0120 16:33:43.984887 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d87453b6-da26-4486-ac4d-cf2798843007-metrics-tls\") pod \"dns-operator-744455d44c-7wptg\" (UID: \"d87453b6-da26-4486-ac4d-cf2798843007\") " pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.020833 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdqmg\" (UniqueName: \"kubernetes.io/projected/d07c6d80-c801-48e5-ae4c-89c80c203585-kube-api-access-bdqmg\") pod \"authentication-operator-69f744f599-rfjfc\" (UID: \"d07c6d80-c801-48e5-ae4c-89c80c203585\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.040948 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdp6v\" (UniqueName: \"kubernetes.io/projected/2585d1e6-a851-4ecc-8acd-8fd3d2426576-kube-api-access-zdp6v\") pod \"machine-api-operator-5694c8668f-l4kss\" (UID: \"2585d1e6-a851-4ecc-8acd-8fd3d2426576\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.048249 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.050112 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080185 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080447 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b77c9d0-1343-4fd0-992a-bd2a4517bf0a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-bcs2r\" (UID: \"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080502 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-metrics-certs\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080567 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b897ebd4-5389-4a32-84d2-27f584c7faab-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wxtx6\" (UID: \"b897ebd4-5389-4a32-84d2-27f584c7faab\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080633 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9czh\" (UniqueName: \"kubernetes.io/projected/c8c61473-94e6-460c-a307-1b2f727a24ed-kube-api-access-g9czh\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080674 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/b01271cc-9d58-4948-9a83-564b481d8eff-tmpfs\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080705 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-client-ca\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080734 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0d62048-3df5-4737-adc3-5544e1402f06-serving-cert\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080833 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-config\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080888 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b01271cc-9d58-4948-9a83-564b481d8eff-apiservice-cert\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.080939 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5d649f74-8f55-43de-b6d5-5203c18f126b-srv-cert\") pod \"olm-operator-6b444d44fb-6k4z7\" (UID: \"5d649f74-8f55-43de-b6d5-5203c18f126b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081006 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b01271cc-9d58-4948-9a83-564b481d8eff-webhook-cert\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081038 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/b01271cc-9d58-4948-9a83-564b481d8eff-tmpfs\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081048 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/43165ab8-b1a3-4885-88f6-bc83ef03f454-config-volume\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081116 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlm98\" (UniqueName: \"kubernetes.io/projected/43165ab8-b1a3-4885-88f6-bc83ef03f454-kube-api-access-rlm98\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081166 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6ndf\" (UniqueName: \"kubernetes.io/projected/b897ebd4-5389-4a32-84d2-27f584c7faab-kube-api-access-b6ndf\") pod \"package-server-manager-789f6589d5-wxtx6\" (UID: \"b897ebd4-5389-4a32-84d2-27f584c7faab\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081196 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzzgt\" (UniqueName: \"kubernetes.io/projected/5d649f74-8f55-43de-b6d5-5203c18f126b-kube-api-access-nzzgt\") pod \"olm-operator-6b444d44fb-6k4z7\" (UID: \"5d649f74-8f55-43de-b6d5-5203c18f126b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081261 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-service-ca-bundle\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081291 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-client-ca\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081328 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5d649f74-8f55-43de-b6d5-5203c18f126b-profile-collector-cert\") pod \"olm-operator-6b444d44fb-6k4z7\" (UID: \"5d649f74-8f55-43de-b6d5-5203c18f126b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081380 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2-srv-cert\") pod \"catalog-operator-68c6474976-5dkkz\" (UID: \"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081417 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b77c9d0-1343-4fd0-992a-bd2a4517bf0a-config\") pod \"kube-apiserver-operator-766d6c64bb-bcs2r\" (UID: \"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081448 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/43165ab8-b1a3-4885-88f6-bc83ef03f454-secret-volume\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081479 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-config\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081536 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/010bede8-799b-47ad-88ae-3da08414f00e-config\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081616 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/010bede8-799b-47ad-88ae-3da08414f00e-serving-cert\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081679 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2-profile-collector-cert\") pod \"catalog-operator-68c6474976-5dkkz\" (UID: \"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081727 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgbwr\" (UniqueName: \"kubernetes.io/projected/ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2-kube-api-access-kgbwr\") pod \"catalog-operator-68c6474976-5dkkz\" (UID: \"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081770 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvgcv\" (UniqueName: \"kubernetes.io/projected/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-kube-api-access-pvgcv\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081833 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5vw8\" (UniqueName: \"kubernetes.io/projected/5573e17e-4b7e-4afd-8608-e8afd1c98256-kube-api-access-j5vw8\") pod \"control-plane-machine-set-operator-78cbb6b69f-xxvtq\" (UID: \"5573e17e-4b7e-4afd-8608-e8afd1c98256\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081870 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-default-certificate\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081900 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8c61473-94e6-460c-a307-1b2f727a24ed-serving-cert\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081934 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5573e17e-4b7e-4afd-8608-e8afd1c98256-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-xxvtq\" (UID: \"5573e17e-4b7e-4afd-8608-e8afd1c98256\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081964 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.081996 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkqcq\" (UniqueName: \"kubernetes.io/projected/a0d62048-3df5-4737-adc3-5544e1402f06-kube-api-access-vkqcq\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.082031 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b77c9d0-1343-4fd0-992a-bd2a4517bf0a-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-bcs2r\" (UID: \"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.082061 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfxsv\" (UniqueName: \"kubernetes.io/projected/b01271cc-9d58-4948-9a83-564b481d8eff-kube-api-access-zfxsv\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.082139 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-stats-auth\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.082201 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7jhw\" (UniqueName: \"kubernetes.io/projected/010bede8-799b-47ad-88ae-3da08414f00e-kube-api-access-s7jhw\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.085673 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx6kd\" (UniqueName: \"kubernetes.io/projected/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-kube-api-access-jx6kd\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.086882 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5573e17e-4b7e-4afd-8608-e8afd1c98256-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-xxvtq\" (UID: \"5573e17e-4b7e-4afd-8608-e8afd1c98256\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.088309 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.121002 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnw4z\" (UniqueName: \"kubernetes.io/projected/156a6fa0-bc96-492d-88d9-6c4873dff771-kube-api-access-lnw4z\") pod \"downloads-7954f5f757-92c94\" (UID: \"156a6fa0-bc96-492d-88d9-6c4873dff771\") " pod="openshift-console/downloads-7954f5f757-92c94" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.142430 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ecd1ca64-de1b-44dd-87ba-20ed3321d99a-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-j5jck\" (UID: \"ecd1ca64-de1b-44dd-87ba-20ed3321d99a\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.165989 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/620b0366-f02c-4d94-aceb-a0ee38453cdb-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-tbsrs\" (UID: \"620b0366-f02c-4d94-aceb-a0ee38453cdb\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.198024 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdvvd\" (UniqueName: \"kubernetes.io/projected/00e95f8d-9e90-4c7c-8030-05f41539b15c-kube-api-access-mdvvd\") pod \"openshift-config-operator-7777fb866f-5f7mb\" (UID: \"00e95f8d-9e90-4c7c-8030-05f41539b15c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.207468 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.208462 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg6g8\" (UniqueName: \"kubernetes.io/projected/b88fed33-25aa-4de1-9a11-ddd5dc808758-kube-api-access-pg6g8\") pod \"etcd-operator-b45778765-9z4p9\" (UID: \"b88fed33-25aa-4de1-9a11-ddd5dc808758\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.228549 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.247974 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.251289 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-rfjfc"] Jan 20 16:33:44 crc kubenswrapper[4995]: W0120 16:33:44.261333 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd07c6d80_c801_48e5_ae4c_89c80c203585.slice/crio-2488de9313e8b4f34a2703ffe2ce52635a915d92386f9a3226040213f3df2f2f WatchSource:0}: Error finding container 2488de9313e8b4f34a2703ffe2ce52635a915d92386f9a3226040213f3df2f2f: Status 404 returned error can't find the container with id 2488de9313e8b4f34a2703ffe2ce52635a915d92386f9a3226040213f3df2f2f Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.267473 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.272630 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-l4kss"] Jan 20 16:33:44 crc kubenswrapper[4995]: W0120 16:33:44.278917 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2585d1e6_a851_4ecc_8acd_8fd3d2426576.slice/crio-074bb9d95ef6b7727dc666e6ae248ede0a8b3e5a39273fad5239d99b83d9994f WatchSource:0}: Error finding container 074bb9d95ef6b7727dc666e6ae248ede0a8b3e5a39273fad5239d99b83d9994f: Status 404 returned error can't find the container with id 074bb9d95ef6b7727dc666e6ae248ede0a8b3e5a39273fad5239d99b83d9994f Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.289142 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.295716 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.307855 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.315017 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-92c94" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.316477 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b01271cc-9d58-4948-9a83-564b481d8eff-webhook-cert\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.317259 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b01271cc-9d58-4948-9a83-564b481d8eff-apiservice-cert\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.327793 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.329754 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.336289 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.348583 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.369137 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.388244 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.399038 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.408022 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.429017 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck"] Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.429059 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.448936 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.468872 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.488598 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.507903 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.528027 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.548980 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.568263 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.588141 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.608650 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.627966 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.635225 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b77c9d0-1343-4fd0-992a-bd2a4517bf0a-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-bcs2r\" (UID: \"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.648394 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.651204 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" event={"ID":"d07c6d80-c801-48e5-ae4c-89c80c203585","Type":"ContainerStarted","Data":"2488de9313e8b4f34a2703ffe2ce52635a915d92386f9a3226040213f3df2f2f"} Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.652355 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" event={"ID":"2585d1e6-a851-4ecc-8acd-8fd3d2426576","Type":"ContainerStarted","Data":"074bb9d95ef6b7727dc666e6ae248ede0a8b3e5a39273fad5239d99b83d9994f"} Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.669017 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.689005 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.711858 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b77c9d0-1343-4fd0-992a-bd2a4517bf0a-config\") pod \"kube-apiserver-operator-766d6c64bb-bcs2r\" (UID: \"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.712304 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.727681 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.748956 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.755857 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2-srv-cert\") pod \"catalog-operator-68c6474976-5dkkz\" (UID: \"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.768969 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.788713 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.795618 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/5d649f74-8f55-43de-b6d5-5203c18f126b-profile-collector-cert\") pod \"olm-operator-6b444d44fb-6k4z7\" (UID: \"5d649f74-8f55-43de-b6d5-5203c18f126b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.796723 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/43165ab8-b1a3-4885-88f6-bc83ef03f454-secret-volume\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.796783 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2-profile-collector-cert\") pod \"catalog-operator-68c6474976-5dkkz\" (UID: \"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.808259 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.829645 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.848583 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.853193 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-config\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.868245 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.886560 4995 request.go:700] Waited for 1.004863533s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-controller-manager/secrets?fieldSelector=metadata.name%3Dserving-cert&limit=500&resourceVersion=0 Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.888951 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.896923 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8c61473-94e6-460c-a307-1b2f727a24ed-serving-cert\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.907518 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.913264 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-client-ca\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.935137 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.944335 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.947646 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.968567 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.988347 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 20 16:33:44 crc kubenswrapper[4995]: I0120 16:33:44.995995 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/5d649f74-8f55-43de-b6d5-5203c18f126b-srv-cert\") pod \"olm-operator-6b444d44fb-6k4z7\" (UID: \"5d649f74-8f55-43de-b6d5-5203c18f126b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.008198 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.028740 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.035588 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0d62048-3df5-4737-adc3-5544e1402f06-serving-cert\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.048364 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.067366 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.081531 4995 configmap.go:193] Couldn't get configMap openshift-ingress/service-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.081680 4995 configmap.go:193] Couldn't get configMap openshift-operator-lifecycle-manager/collect-profiles-config: failed to sync configmap cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.081577 4995 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.081609 4995 secret.go:188] Couldn't get secret openshift-ingress/router-metrics-certs-default: failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.081664 4995 configmap.go:193] Couldn't get configMap openshift-route-controller-manager/client-ca: failed to sync configmap cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.081715 4995 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.081986 4995 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.082037 4995 secret.go:188] Couldn't get secret openshift-ingress/router-certs-default: failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.082501 4995 secret.go:188] Couldn't get secret openshift-ingress/router-stats-default: failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.082673 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-service-ca-bundle podName:9ccd2a3c-8848-42cb-96fa-a9df5a60a729 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:45.581770448 +0000 UTC m=+143.826375294 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca-bundle" (UniqueName: "kubernetes.io/configmap/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-service-ca-bundle") pod "router-default-5444994796-9m9xl" (UID: "9ccd2a3c-8848-42cb-96fa-a9df5a60a729") : failed to sync configmap cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.082864 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/43165ab8-b1a3-4885-88f6-bc83ef03f454-config-volume podName:43165ab8-b1a3-4885-88f6-bc83ef03f454 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:45.582842759 +0000 UTC m=+143.827447605 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/43165ab8-b1a3-4885-88f6-bc83ef03f454-config-volume") pod "collect-profiles-29482110-4xrhs" (UID: "43165ab8-b1a3-4885-88f6-bc83ef03f454") : failed to sync configmap cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.083045 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b897ebd4-5389-4a32-84d2-27f584c7faab-package-server-manager-serving-cert podName:b897ebd4-5389-4a32-84d2-27f584c7faab nodeName:}" failed. No retries permitted until 2026-01-20 16:33:45.583022284 +0000 UTC m=+143.827627130 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/b897ebd4-5389-4a32-84d2-27f584c7faab-package-server-manager-serving-cert") pod "package-server-manager-789f6589d5-wxtx6" (UID: "b897ebd4-5389-4a32-84d2-27f584c7faab") : failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.083299 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-metrics-certs podName:9ccd2a3c-8848-42cb-96fa-a9df5a60a729 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:45.583278691 +0000 UTC m=+143.827883537 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-metrics-certs") pod "router-default-5444994796-9m9xl" (UID: "9ccd2a3c-8848-42cb-96fa-a9df5a60a729") : failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.083468 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-client-ca podName:a0d62048-3df5-4737-adc3-5544e1402f06 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:45.583453086 +0000 UTC m=+143.828057932 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "client-ca" (UniqueName: "kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-client-ca") pod "route-controller-manager-6576b87f9c-67sf4" (UID: "a0d62048-3df5-4737-adc3-5544e1402f06") : failed to sync configmap cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.083625 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/010bede8-799b-47ad-88ae-3da08414f00e-config podName:010bede8-799b-47ad-88ae-3da08414f00e nodeName:}" failed. No retries permitted until 2026-01-20 16:33:45.5836097 +0000 UTC m=+143.828214546 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/010bede8-799b-47ad-88ae-3da08414f00e-config") pod "service-ca-operator-777779d784-9jh8c" (UID: "010bede8-799b-47ad-88ae-3da08414f00e") : failed to sync configmap cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.083774 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/010bede8-799b-47ad-88ae-3da08414f00e-serving-cert podName:010bede8-799b-47ad-88ae-3da08414f00e nodeName:}" failed. No retries permitted until 2026-01-20 16:33:45.583760255 +0000 UTC m=+143.828365111 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/010bede8-799b-47ad-88ae-3da08414f00e-serving-cert") pod "service-ca-operator-777779d784-9jh8c" (UID: "010bede8-799b-47ad-88ae-3da08414f00e") : failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.083937 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-default-certificate podName:9ccd2a3c-8848-42cb-96fa-a9df5a60a729 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:45.583921739 +0000 UTC m=+143.828526585 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "default-certificate" (UniqueName: "kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-default-certificate") pod "router-default-5444994796-9m9xl" (UID: "9ccd2a3c-8848-42cb-96fa-a9df5a60a729") : failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: E0120 16:33:45.084139 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-stats-auth podName:9ccd2a3c-8848-42cb-96fa-a9df5a60a729 nodeName:}" failed. No retries permitted until 2026-01-20 16:33:45.584112895 +0000 UTC m=+143.828717741 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "stats-auth" (UniqueName: "kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-stats-auth") pod "router-default-5444994796-9m9xl" (UID: "9ccd2a3c-8848-42cb-96fa-a9df5a60a729") : failed to sync secret cache: timed out waiting for the condition Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.087595 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.108888 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.128223 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.148804 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.167975 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.188805 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.208190 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.228899 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.248808 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.269703 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.289605 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.307023 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-config\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.309933 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 20 16:33:45 crc kubenswrapper[4995]: W0120 16:33:45.316039 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podecd1ca64_de1b_44dd_87ba_20ed3321d99a.slice/crio-6210274fc2104cbaab489b39f56e259d24956806ac8f985b8e0dced0252adf14 WatchSource:0}: Error finding container 6210274fc2104cbaab489b39f56e259d24956806ac8f985b8e0dced0252adf14: Status 404 returned error can't find the container with id 6210274fc2104cbaab489b39f56e259d24956806ac8f985b8e0dced0252adf14 Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.330349 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.348236 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.368303 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.388103 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.409360 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.427883 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.448481 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.468148 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.487887 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.508539 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.555934 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.556190 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.557145 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb"] Jan 20 16:33:45 crc kubenswrapper[4995]: W0120 16:33:45.564661 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00e95f8d_9e90_4c7c_8030_05f41539b15c.slice/crio-c98fddb7817694df8688642186da5d211ac7f4b106f58dcc71a7dd0f3639d41c WatchSource:0}: Error finding container c98fddb7817694df8688642186da5d211ac7f4b106f58dcc71a7dd0f3639d41c: Status 404 returned error can't find the container with id c98fddb7817694df8688642186da5d211ac7f4b106f58dcc71a7dd0f3639d41c Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.567852 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.570767 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs"] Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.588291 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.605721 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/010bede8-799b-47ad-88ae-3da08414f00e-config\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.606042 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/010bede8-799b-47ad-88ae-3da08414f00e-serving-cert\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.606176 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-default-certificate\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.606235 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-stats-auth\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.606275 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-metrics-certs\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.606329 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b897ebd4-5389-4a32-84d2-27f584c7faab-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wxtx6\" (UID: \"b897ebd4-5389-4a32-84d2-27f584c7faab\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.606376 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-client-ca\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.606427 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/43165ab8-b1a3-4885-88f6-bc83ef03f454-config-volume\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.606441 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/010bede8-799b-47ad-88ae-3da08414f00e-config\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.606521 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-service-ca-bundle\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.607136 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-service-ca-bundle\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.607168 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-92c94"] Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.607672 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-client-ca\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.608353 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/43165ab8-b1a3-4885-88f6-bc83ef03f454-config-volume\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.608995 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9z4p9"] Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.611881 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-metrics-certs\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.611949 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/010bede8-799b-47ad-88ae-3da08414f00e-serving-cert\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.612562 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b897ebd4-5389-4a32-84d2-27f584c7faab-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-wxtx6\" (UID: \"b897ebd4-5389-4a32-84d2-27f584c7faab\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.612610 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-stats-auth\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.612955 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-default-certificate\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:45 crc kubenswrapper[4995]: W0120 16:33:45.625988 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb88fed33_25aa_4de1_9a11_ddd5dc808758.slice/crio-26c61c43632d7999c870370383c8d85c0503f50612905433114a65fea30e9abd WatchSource:0}: Error finding container 26c61c43632d7999c870370383c8d85c0503f50612905433114a65fea30e9abd: Status 404 returned error can't find the container with id 26c61c43632d7999c870370383c8d85c0503f50612905433114a65fea30e9abd Jan 20 16:33:45 crc kubenswrapper[4995]: W0120 16:33:45.627045 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod156a6fa0_bc96_492d_88d9_6c4873dff771.slice/crio-f1717310f40332936eb8c25d50ea9b14881219b24bd2d17a81ff263d3a0435f3 WatchSource:0}: Error finding container f1717310f40332936eb8c25d50ea9b14881219b24bd2d17a81ff263d3a0435f3: Status 404 returned error can't find the container with id f1717310f40332936eb8c25d50ea9b14881219b24bd2d17a81ff263d3a0435f3 Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.627911 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.648229 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.659046 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" event={"ID":"620b0366-f02c-4d94-aceb-a0ee38453cdb","Type":"ContainerStarted","Data":"a3c0ac152d9c72b7fd9e81e84c400b79230c294f8d2a812abfbc7814af3ef1bb"} Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.660043 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" event={"ID":"b88fed33-25aa-4de1-9a11-ddd5dc808758","Type":"ContainerStarted","Data":"26c61c43632d7999c870370383c8d85c0503f50612905433114a65fea30e9abd"} Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.662231 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" event={"ID":"d07c6d80-c801-48e5-ae4c-89c80c203585","Type":"ContainerStarted","Data":"e27795540601fe15e22f62f52e0608d89a89183a3bf8a059f330aa1015b923d5"} Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.667688 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.669366 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" event={"ID":"2585d1e6-a851-4ecc-8acd-8fd3d2426576","Type":"ContainerStarted","Data":"7065a3fb0c158ab222d1450efacabbb44fd427e7013ee5cbce38d4349cb2e55a"} Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.669460 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" event={"ID":"2585d1e6-a851-4ecc-8acd-8fd3d2426576","Type":"ContainerStarted","Data":"fa7c45cf7c961cb464d1b0866bc385d116a059cd9ea0d3a43236220a40ccb0e3"} Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.671262 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" event={"ID":"00e95f8d-9e90-4c7c-8030-05f41539b15c","Type":"ContainerStarted","Data":"c98fddb7817694df8688642186da5d211ac7f4b106f58dcc71a7dd0f3639d41c"} Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.677539 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-92c94" event={"ID":"156a6fa0-bc96-492d-88d9-6c4873dff771","Type":"ContainerStarted","Data":"f1717310f40332936eb8c25d50ea9b14881219b24bd2d17a81ff263d3a0435f3"} Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.680368 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" event={"ID":"ecd1ca64-de1b-44dd-87ba-20ed3321d99a","Type":"ContainerStarted","Data":"ab403e6799ecd96c46c769f107f26bbd76326a893e3d9a6877b7f3584321766e"} Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.680698 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" event={"ID":"ecd1ca64-de1b-44dd-87ba-20ed3321d99a","Type":"ContainerStarted","Data":"6210274fc2104cbaab489b39f56e259d24956806ac8f985b8e0dced0252adf14"} Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.687696 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.713524 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.728210 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.748346 4995 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.767620 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.788043 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.807860 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.828389 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.848874 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.867934 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.886810 4995 request.go:700] Waited for 1.91542845s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/serviceaccounts/ingress-operator/token Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.905687 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k6gx\" (UniqueName: \"kubernetes.io/projected/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-kube-api-access-8k6gx\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.925635 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrnrv\" (UniqueName: \"kubernetes.io/projected/2cd4e02b-cb10-4bb2-b318-d24372346b1d-kube-api-access-xrnrv\") pod \"console-f9d7485db-jzs8c\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.941810 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jjvx\" (UniqueName: \"kubernetes.io/projected/b16b30f2-2578-46ab-b622-4f979193c91c-kube-api-access-5jjvx\") pod \"machine-config-operator-74547568cd-b4vgm\" (UID: \"b16b30f2-2578-46ab-b622-4f979193c91c\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.965225 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg9qw\" (UniqueName: \"kubernetes.io/projected/0b50c17b-56ad-4488-851a-2ace4d4c1184-kube-api-access-qg9qw\") pod \"machine-approver-56656f9798-f5x87\" (UID: \"0b50c17b-56ad-4488-851a-2ace4d4c1184\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.981311 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwkg5\" (UniqueName: \"kubernetes.io/projected/e2421e52-1edf-4403-bf98-3738f28fefa5-kube-api-access-kwkg5\") pod \"openshift-apiserver-operator-796bbdcf4f-zhdmz\" (UID: \"e2421e52-1edf-4403-bf98-3738f28fefa5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:45 crc kubenswrapper[4995]: I0120 16:33:45.996488 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.008961 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9ed517c9-f353-4a74-9d38-7f5fb3c166ca-bound-sa-token\") pod \"ingress-operator-5b745b69d9-h4lnx\" (UID: \"9ed517c9-f353-4a74-9d38-7f5fb3c166ca\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.009204 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.016389 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.024762 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjcjh\" (UniqueName: \"kubernetes.io/projected/b629e2c3-ed8f-4f3e-af06-ec30249e2af7-kube-api-access-sjcjh\") pod \"cluster-samples-operator-665b6dd947-jpgkb\" (UID: \"b629e2c3-ed8f-4f3e-af06-ec30249e2af7\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.048494 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tp775\" (UniqueName: \"kubernetes.io/projected/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-kube-api-access-tp775\") pod \"oauth-openshift-558db77b4-8dc2q\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.070233 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlg25\" (UniqueName: \"kubernetes.io/projected/35c557f8-2a15-4838-8042-16c5eb9ae8af-kube-api-access-wlg25\") pod \"openshift-controller-manager-operator-756b6f6bc6-rvmnn\" (UID: \"35c557f8-2a15-4838-8042-16c5eb9ae8af\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.095529 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nh9gr\" (UniqueName: \"kubernetes.io/projected/d87453b6-da26-4486-ac4d-cf2798843007-kube-api-access-nh9gr\") pod \"dns-operator-744455d44c-7wptg\" (UID: \"d87453b6-da26-4486-ac4d-cf2798843007\") " pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.110858 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv6tr\" (UniqueName: \"kubernetes.io/projected/ab7c9813-036f-4c85-82f8-389afa92ecbc-kube-api-access-rv6tr\") pod \"console-operator-58897d9998-55g56\" (UID: \"ab7c9813-036f-4c85-82f8-389afa92ecbc\") " pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.124281 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd2t2\" (UniqueName: \"kubernetes.io/projected/10d3852f-ae68-471d-8501-a31f353ae0cd-kube-api-access-fd2t2\") pod \"marketplace-operator-79b997595-t456c\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.165593 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9czh\" (UniqueName: \"kubernetes.io/projected/c8c61473-94e6-460c-a307-1b2f727a24ed-kube-api-access-g9czh\") pod \"controller-manager-879f6c89f-prbmb\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.181820 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlm98\" (UniqueName: \"kubernetes.io/projected/43165ab8-b1a3-4885-88f6-bc83ef03f454-kube-api-access-rlm98\") pod \"collect-profiles-29482110-4xrhs\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.194645 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm"] Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.200626 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzzgt\" (UniqueName: \"kubernetes.io/projected/5d649f74-8f55-43de-b6d5-5203c18f126b-kube-api-access-nzzgt\") pod \"olm-operator-6b444d44fb-6k4z7\" (UID: \"5d649f74-8f55-43de-b6d5-5203c18f126b\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.205442 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:46 crc kubenswrapper[4995]: W0120 16:33:46.205743 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb16b30f2_2578_46ab_b622_4f979193c91c.slice/crio-e3d6d7b2dc140ddfbb7cc2cb9e0dede749f11b6c00c224436b7382a739059937 WatchSource:0}: Error finding container e3d6d7b2dc140ddfbb7cc2cb9e0dede749f11b6c00c224436b7382a739059937: Status 404 returned error can't find the container with id e3d6d7b2dc140ddfbb7cc2cb9e0dede749f11b6c00c224436b7382a739059937 Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.217589 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.221824 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.232117 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.235546 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6ndf\" (UniqueName: \"kubernetes.io/projected/b897ebd4-5389-4a32-84d2-27f584c7faab-kube-api-access-b6ndf\") pod \"package-server-manager-789f6589d5-wxtx6\" (UID: \"b897ebd4-5389-4a32-84d2-27f584c7faab\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.242457 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-jzs8c"] Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.247254 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgbwr\" (UniqueName: \"kubernetes.io/projected/ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2-kube-api-access-kgbwr\") pod \"catalog-operator-68c6474976-5dkkz\" (UID: \"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.256431 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.256601 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.265890 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz"] Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.272012 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5vw8\" (UniqueName: \"kubernetes.io/projected/5573e17e-4b7e-4afd-8608-e8afd1c98256-kube-api-access-j5vw8\") pod \"control-plane-machine-set-operator-78cbb6b69f-xxvtq\" (UID: \"5573e17e-4b7e-4afd-8608-e8afd1c98256\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.290265 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.291703 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvgcv\" (UniqueName: \"kubernetes.io/projected/9ccd2a3c-8848-42cb-96fa-a9df5a60a729-kube-api-access-pvgcv\") pod \"router-default-5444994796-9m9xl\" (UID: \"9ccd2a3c-8848-42cb-96fa-a9df5a60a729\") " pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.302864 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b77c9d0-1343-4fd0-992a-bd2a4517bf0a-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-bcs2r\" (UID: \"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.323497 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.327784 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkqcq\" (UniqueName: \"kubernetes.io/projected/a0d62048-3df5-4737-adc3-5544e1402f06-kube-api-access-vkqcq\") pod \"route-controller-manager-6576b87f9c-67sf4\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.330345 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.338430 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.343392 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.344546 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfxsv\" (UniqueName: \"kubernetes.io/projected/b01271cc-9d58-4948-9a83-564b481d8eff-kube-api-access-zfxsv\") pod \"packageserver-d55dfcdfc-t8rpq\" (UID: \"b01271cc-9d58-4948-9a83-564b481d8eff\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.349431 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.360887 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.365423 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7jhw\" (UniqueName: \"kubernetes.io/projected/010bede8-799b-47ad-88ae-3da08414f00e-kube-api-access-s7jhw\") pod \"service-ca-operator-777779d784-9jh8c\" (UID: \"010bede8-799b-47ad-88ae-3da08414f00e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.419840 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.427227 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7bd41743-62a2-4cec-84c0-68fef9f476e4-encryption-config\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.427260 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-tls\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.427381 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvnfz\" (UniqueName: \"kubernetes.io/projected/7bd41743-62a2-4cec-84c0-68fef9f476e4-kube-api-access-jvnfz\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.427459 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c80e6dd0-f551-4ad6-99c3-d96c50bb380b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-rdsbm\" (UID: \"c80e6dd0-f551-4ad6-99c3-d96c50bb380b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.427499 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/39d5b08d-6337-4abb-9e94-191b0c6ca2db-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-vhqdb\" (UID: \"39d5b08d-6337-4abb-9e94-191b0c6ca2db\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429262 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/231817fb-8c3c-4a4f-ad19-f019974187e7-serving-cert\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429304 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/231817fb-8c3c-4a4f-ad19-f019974187e7-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429663 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kszjk\" (UniqueName: \"kubernetes.io/projected/39d5b08d-6337-4abb-9e94-191b0c6ca2db-kube-api-access-kszjk\") pod \"machine-config-controller-84d6567774-vhqdb\" (UID: \"39d5b08d-6337-4abb-9e94-191b0c6ca2db\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429684 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6-signing-cabundle\") pod \"service-ca-9c57cc56f-62hj9\" (UID: \"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6\") " pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429723 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/231817fb-8c3c-4a4f-ad19-f019974187e7-audit-dir\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429781 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7bd41743-62a2-4cec-84c0-68fef9f476e4-etcd-client\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429819 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/39d5b08d-6337-4abb-9e94-191b0c6ca2db-proxy-tls\") pod \"machine-config-controller-84d6567774-vhqdb\" (UID: \"39d5b08d-6337-4abb-9e94-191b0c6ca2db\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429836 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-469t8\" (UniqueName: \"kubernetes.io/projected/231962b2-f585-4eeb-9477-4fd547c803f4-kube-api-access-469t8\") pod \"migrator-59844c95c7-kd9rt\" (UID: \"231962b2-f585-4eeb-9477-4fd547c803f4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429861 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/231817fb-8c3c-4a4f-ad19-f019974187e7-audit-policies\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429874 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-image-import-ca\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429897 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwslh\" (UniqueName: \"kubernetes.io/projected/7b1451e1-776c-411e-9790-8091d11c01fd-kube-api-access-lwslh\") pod \"multus-admission-controller-857f4d67dd-vw77m\" (UID: \"7b1451e1-776c-411e-9790-8091d11c01fd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429933 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-config\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429950 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c80e6dd0-f551-4ad6-99c3-d96c50bb380b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-rdsbm\" (UID: \"c80e6dd0-f551-4ad6-99c3-d96c50bb380b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.429979 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-certificates\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430038 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-bound-sa-token\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430054 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xjxx\" (UniqueName: \"kubernetes.io/projected/d98c0234-3282-4f09-b2df-a3153363542c-kube-api-access-7xjxx\") pod \"kube-storage-version-migrator-operator-b67b599dd-dwx5d\" (UID: \"d98c0234-3282-4f09-b2df-a3153363542c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430223 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430262 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/7bd41743-62a2-4cec-84c0-68fef9f476e4-node-pullsecrets\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430309 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d98c0234-3282-4f09-b2df-a3153363542c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dwx5d\" (UID: \"d98c0234-3282-4f09-b2df-a3153363542c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430327 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c80e6dd0-f551-4ad6-99c3-d96c50bb380b-config\") pod \"kube-controller-manager-operator-78b949d7b-rdsbm\" (UID: \"c80e6dd0-f551-4ad6-99c3-d96c50bb380b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430353 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdzh5\" (UniqueName: \"kubernetes.io/projected/231817fb-8c3c-4a4f-ad19-f019974187e7-kube-api-access-tdzh5\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430372 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/231817fb-8c3c-4a4f-ad19-f019974187e7-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430388 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/231817fb-8c3c-4a4f-ad19-f019974187e7-encryption-config\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430404 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7b1451e1-776c-411e-9790-8091d11c01fd-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-vw77m\" (UID: \"7b1451e1-776c-411e-9790-8091d11c01fd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430424 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-ca-trust-extracted\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430438 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430472 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d98c0234-3282-4f09-b2df-a3153363542c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dwx5d\" (UID: \"d98c0234-3282-4f09-b2df-a3153363542c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430528 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6-signing-key\") pod \"service-ca-9c57cc56f-62hj9\" (UID: \"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6\") " pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430545 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-trusted-ca\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430560 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffzsd\" (UniqueName: \"kubernetes.io/projected/c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6-kube-api-access-ffzsd\") pod \"service-ca-9c57cc56f-62hj9\" (UID: \"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6\") " pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430577 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-installation-pull-secrets\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430593 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-etcd-serving-ca\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430611 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/231817fb-8c3c-4a4f-ad19-f019974187e7-etcd-client\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430646 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9t7b\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-kube-api-access-d9t7b\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430661 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7bd41743-62a2-4cec-84c0-68fef9f476e4-serving-cert\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430676 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-audit\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.430689 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7bd41743-62a2-4cec-84c0-68fef9f476e4-audit-dir\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: E0120 16:33:46.431668 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:46.931654997 +0000 UTC m=+145.176259803 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.473933 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.488378 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.520577 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.535682 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.535859 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536053 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-tls\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: E0120 16:33:46.536187 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.036160893 +0000 UTC m=+145.280765699 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536240 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvnfz\" (UniqueName: \"kubernetes.io/projected/7bd41743-62a2-4cec-84c0-68fef9f476e4-kube-api-access-jvnfz\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536279 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c80e6dd0-f551-4ad6-99c3-d96c50bb380b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-rdsbm\" (UID: \"c80e6dd0-f551-4ad6-99c3-d96c50bb380b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536318 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/39d5b08d-6337-4abb-9e94-191b0c6ca2db-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-vhqdb\" (UID: \"39d5b08d-6337-4abb-9e94-191b0c6ca2db\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536341 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-registration-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536362 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/231817fb-8c3c-4a4f-ad19-f019974187e7-serving-cert\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536377 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/231817fb-8c3c-4a4f-ad19-f019974187e7-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536407 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-csi-data-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536453 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kszjk\" (UniqueName: \"kubernetes.io/projected/39d5b08d-6337-4abb-9e94-191b0c6ca2db-kube-api-access-kszjk\") pod \"machine-config-controller-84d6567774-vhqdb\" (UID: \"39d5b08d-6337-4abb-9e94-191b0c6ca2db\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536474 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6-signing-cabundle\") pod \"service-ca-9c57cc56f-62hj9\" (UID: \"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6\") " pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.536488 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/488cae6f-2862-4e11-a22d-7d39de3604c3-metrics-tls\") pod \"dns-default-gdlqb\" (UID: \"488cae6f-2862-4e11-a22d-7d39de3604c3\") " pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537133 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-mountpoint-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537164 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/488cae6f-2862-4e11-a22d-7d39de3604c3-config-volume\") pod \"dns-default-gdlqb\" (UID: \"488cae6f-2862-4e11-a22d-7d39de3604c3\") " pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537213 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/231817fb-8c3c-4a4f-ad19-f019974187e7-audit-dir\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537325 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7bd41743-62a2-4cec-84c0-68fef9f476e4-etcd-client\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537357 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk457\" (UniqueName: \"kubernetes.io/projected/488cae6f-2862-4e11-a22d-7d39de3604c3-kube-api-access-jk457\") pod \"dns-default-gdlqb\" (UID: \"488cae6f-2862-4e11-a22d-7d39de3604c3\") " pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537432 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/39d5b08d-6337-4abb-9e94-191b0c6ca2db-proxy-tls\") pod \"machine-config-controller-84d6567774-vhqdb\" (UID: \"39d5b08d-6337-4abb-9e94-191b0c6ca2db\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537533 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-469t8\" (UniqueName: \"kubernetes.io/projected/231962b2-f585-4eeb-9477-4fd547c803f4-kube-api-access-469t8\") pod \"migrator-59844c95c7-kd9rt\" (UID: \"231962b2-f585-4eeb-9477-4fd547c803f4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537580 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/231817fb-8c3c-4a4f-ad19-f019974187e7-audit-policies\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537619 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-image-import-ca\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537684 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwslh\" (UniqueName: \"kubernetes.io/projected/7b1451e1-776c-411e-9790-8091d11c01fd-kube-api-access-lwslh\") pod \"multus-admission-controller-857f4d67dd-vw77m\" (UID: \"7b1451e1-776c-411e-9790-8091d11c01fd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537727 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-config\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.537856 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c80e6dd0-f551-4ad6-99c3-d96c50bb380b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-rdsbm\" (UID: \"c80e6dd0-f551-4ad6-99c3-d96c50bb380b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.538621 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-certificates\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.538685 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-bound-sa-token\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.538706 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xjxx\" (UniqueName: \"kubernetes.io/projected/d98c0234-3282-4f09-b2df-a3153363542c-kube-api-access-7xjxx\") pod \"kube-storage-version-migrator-operator-b67b599dd-dwx5d\" (UID: \"d98c0234-3282-4f09-b2df-a3153363542c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.538785 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqnkh\" (UniqueName: \"kubernetes.io/projected/b2b12051-1d9c-4eab-89ff-66cba2d03717-kube-api-access-bqnkh\") pod \"machine-config-server-k24sk\" (UID: \"b2b12051-1d9c-4eab-89ff-66cba2d03717\") " pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.538941 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.538980 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-socket-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.538998 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/7bd41743-62a2-4cec-84c0-68fef9f476e4-node-pullsecrets\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539016 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d98c0234-3282-4f09-b2df-a3153363542c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dwx5d\" (UID: \"d98c0234-3282-4f09-b2df-a3153363542c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539062 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c80e6dd0-f551-4ad6-99c3-d96c50bb380b-config\") pod \"kube-controller-manager-operator-78b949d7b-rdsbm\" (UID: \"c80e6dd0-f551-4ad6-99c3-d96c50bb380b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539214 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b2b12051-1d9c-4eab-89ff-66cba2d03717-certs\") pod \"machine-config-server-k24sk\" (UID: \"b2b12051-1d9c-4eab-89ff-66cba2d03717\") " pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539249 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdzh5\" (UniqueName: \"kubernetes.io/projected/231817fb-8c3c-4a4f-ad19-f019974187e7-kube-api-access-tdzh5\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539277 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/231817fb-8c3c-4a4f-ad19-f019974187e7-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539539 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/231817fb-8c3c-4a4f-ad19-f019974187e7-encryption-config\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539567 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7b1451e1-776c-411e-9790-8091d11c01fd-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-vw77m\" (UID: \"7b1451e1-776c-411e-9790-8091d11c01fd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539605 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-ca-trust-extracted\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539821 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539875 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d98c0234-3282-4f09-b2df-a3153363542c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dwx5d\" (UID: \"d98c0234-3282-4f09-b2df-a3153363542c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539915 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6-signing-key\") pod \"service-ca-9c57cc56f-62hj9\" (UID: \"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6\") " pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539935 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b2b12051-1d9c-4eab-89ff-66cba2d03717-node-bootstrap-token\") pod \"machine-config-server-k24sk\" (UID: \"b2b12051-1d9c-4eab-89ff-66cba2d03717\") " pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539970 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-plugins-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.539986 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk8xx\" (UniqueName: \"kubernetes.io/projected/a7c35d38-f660-461e-84b5-66be0efed3da-kube-api-access-sk8xx\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540091 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-trusted-ca\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540152 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffzsd\" (UniqueName: \"kubernetes.io/projected/c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6-kube-api-access-ffzsd\") pod \"service-ca-9c57cc56f-62hj9\" (UID: \"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6\") " pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540197 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-installation-pull-secrets\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540217 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-etcd-serving-ca\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540269 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/231817fb-8c3c-4a4f-ad19-f019974187e7-etcd-client\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540310 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fcda7605-c410-4f58-a935-82ca1f15c81b-cert\") pod \"ingress-canary-zd9pn\" (UID: \"fcda7605-c410-4f58-a935-82ca1f15c81b\") " pod="openshift-ingress-canary/ingress-canary-zd9pn" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540327 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46dh8\" (UniqueName: \"kubernetes.io/projected/fcda7605-c410-4f58-a935-82ca1f15c81b-kube-api-access-46dh8\") pod \"ingress-canary-zd9pn\" (UID: \"fcda7605-c410-4f58-a935-82ca1f15c81b\") " pod="openshift-ingress-canary/ingress-canary-zd9pn" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540391 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9t7b\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-kube-api-access-d9t7b\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540632 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7bd41743-62a2-4cec-84c0-68fef9f476e4-serving-cert\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540651 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-audit\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540677 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7bd41743-62a2-4cec-84c0-68fef9f476e4-audit-dir\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.540733 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7bd41743-62a2-4cec-84c0-68fef9f476e4-encryption-config\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.543978 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-ca-trust-extracted\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.545388 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c80e6dd0-f551-4ad6-99c3-d96c50bb380b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-rdsbm\" (UID: \"c80e6dd0-f551-4ad6-99c3-d96c50bb380b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.547372 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c80e6dd0-f551-4ad6-99c3-d96c50bb380b-config\") pod \"kube-controller-manager-operator-78b949d7b-rdsbm\" (UID: \"c80e6dd0-f551-4ad6-99c3-d96c50bb380b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.547423 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/231817fb-8c3c-4a4f-ad19-f019974187e7-serving-cert\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.547469 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/231817fb-8c3c-4a4f-ad19-f019974187e7-audit-dir\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.548429 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/39d5b08d-6337-4abb-9e94-191b0c6ca2db-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-vhqdb\" (UID: \"39d5b08d-6337-4abb-9e94-191b0c6ca2db\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.550994 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-image-import-ca\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: E0120 16:33:46.551445 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.051428144 +0000 UTC m=+145.296032950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.552018 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/231817fb-8c3c-4a4f-ad19-f019974187e7-audit-policies\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.553371 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/231817fb-8c3c-4a4f-ad19-f019974187e7-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.554829 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-prbmb"] Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.554986 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/7bd41743-62a2-4cec-84c0-68fef9f476e4-node-pullsecrets\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.555490 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.555510 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-trusted-ca\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.555947 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/231817fb-8c3c-4a4f-ad19-f019974187e7-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.556321 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-config\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.556690 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-certificates\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.556838 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-audit\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.556904 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx"] Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.557165 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7bd41743-62a2-4cec-84c0-68fef9f476e4-audit-dir\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.559716 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/7b1451e1-776c-411e-9790-8091d11c01fd-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-vw77m\" (UID: \"7b1451e1-776c-411e-9790-8091d11c01fd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.560602 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7bd41743-62a2-4cec-84c0-68fef9f476e4-etcd-serving-ca\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.561412 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/39d5b08d-6337-4abb-9e94-191b0c6ca2db-proxy-tls\") pod \"machine-config-controller-84d6567774-vhqdb\" (UID: \"39d5b08d-6337-4abb-9e94-191b0c6ca2db\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.561752 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7bd41743-62a2-4cec-84c0-68fef9f476e4-encryption-config\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.561938 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6-signing-cabundle\") pod \"service-ca-9c57cc56f-62hj9\" (UID: \"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6\") " pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.566518 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/231817fb-8c3c-4a4f-ad19-f019974187e7-etcd-client\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.566661 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6-signing-key\") pod \"service-ca-9c57cc56f-62hj9\" (UID: \"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6\") " pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.566867 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.573117 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d98c0234-3282-4f09-b2df-a3153363542c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dwx5d\" (UID: \"d98c0234-3282-4f09-b2df-a3153363542c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.579210 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d98c0234-3282-4f09-b2df-a3153363542c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dwx5d\" (UID: \"d98c0234-3282-4f09-b2df-a3153363542c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.580171 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7bd41743-62a2-4cec-84c0-68fef9f476e4-etcd-client\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: W0120 16:33:46.597234 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8c61473_94e6_460c_a307_1b2f727a24ed.slice/crio-20ffa9ca3e34f3246b2befe6dad017972a266921c3af9cf00a3a8cecd5f4608c WatchSource:0}: Error finding container 20ffa9ca3e34f3246b2befe6dad017972a266921c3af9cf00a3a8cecd5f4608c: Status 404 returned error can't find the container with id 20ffa9ca3e34f3246b2befe6dad017972a266921c3af9cf00a3a8cecd5f4608c Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.597716 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7bd41743-62a2-4cec-84c0-68fef9f476e4-serving-cert\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.598156 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-tls\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.598377 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/231817fb-8c3c-4a4f-ad19-f019974187e7-encryption-config\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.598389 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvnfz\" (UniqueName: \"kubernetes.io/projected/7bd41743-62a2-4cec-84c0-68fef9f476e4-kube-api-access-jvnfz\") pod \"apiserver-76f77b778f-6wxvh\" (UID: \"7bd41743-62a2-4cec-84c0-68fef9f476e4\") " pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.598928 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-installation-pull-secrets\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.599307 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7"] Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.617464 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9t7b\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-kube-api-access-d9t7b\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.625991 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwslh\" (UniqueName: \"kubernetes.io/projected/7b1451e1-776c-411e-9790-8091d11c01fd-kube-api-access-lwslh\") pod \"multus-admission-controller-857f4d67dd-vw77m\" (UID: \"7b1451e1-776c-411e-9790-8091d11c01fd\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642192 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642451 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqnkh\" (UniqueName: \"kubernetes.io/projected/b2b12051-1d9c-4eab-89ff-66cba2d03717-kube-api-access-bqnkh\") pod \"machine-config-server-k24sk\" (UID: \"b2b12051-1d9c-4eab-89ff-66cba2d03717\") " pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642498 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-socket-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642518 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b2b12051-1d9c-4eab-89ff-66cba2d03717-certs\") pod \"machine-config-server-k24sk\" (UID: \"b2b12051-1d9c-4eab-89ff-66cba2d03717\") " pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642550 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b2b12051-1d9c-4eab-89ff-66cba2d03717-node-bootstrap-token\") pod \"machine-config-server-k24sk\" (UID: \"b2b12051-1d9c-4eab-89ff-66cba2d03717\") " pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642568 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-plugins-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642583 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk8xx\" (UniqueName: \"kubernetes.io/projected/a7c35d38-f660-461e-84b5-66be0efed3da-kube-api-access-sk8xx\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642609 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fcda7605-c410-4f58-a935-82ca1f15c81b-cert\") pod \"ingress-canary-zd9pn\" (UID: \"fcda7605-c410-4f58-a935-82ca1f15c81b\") " pod="openshift-ingress-canary/ingress-canary-zd9pn" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642623 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46dh8\" (UniqueName: \"kubernetes.io/projected/fcda7605-c410-4f58-a935-82ca1f15c81b-kube-api-access-46dh8\") pod \"ingress-canary-zd9pn\" (UID: \"fcda7605-c410-4f58-a935-82ca1f15c81b\") " pod="openshift-ingress-canary/ingress-canary-zd9pn" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642656 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-registration-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642670 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-csi-data-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642698 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/488cae6f-2862-4e11-a22d-7d39de3604c3-metrics-tls\") pod \"dns-default-gdlqb\" (UID: \"488cae6f-2862-4e11-a22d-7d39de3604c3\") " pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642712 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-mountpoint-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642729 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/488cae6f-2862-4e11-a22d-7d39de3604c3-config-volume\") pod \"dns-default-gdlqb\" (UID: \"488cae6f-2862-4e11-a22d-7d39de3604c3\") " pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.642745 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk457\" (UniqueName: \"kubernetes.io/projected/488cae6f-2862-4e11-a22d-7d39de3604c3-kube-api-access-jk457\") pod \"dns-default-gdlqb\" (UID: \"488cae6f-2862-4e11-a22d-7d39de3604c3\") " pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: E0120 16:33:46.642938 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.142922821 +0000 UTC m=+145.387527627 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.643229 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-socket-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.647375 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b2b12051-1d9c-4eab-89ff-66cba2d03717-certs\") pod \"machine-config-server-k24sk\" (UID: \"b2b12051-1d9c-4eab-89ff-66cba2d03717\") " pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.657822 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-plugins-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.664539 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.665436 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-registration-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.666028 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/488cae6f-2862-4e11-a22d-7d39de3604c3-config-volume\") pod \"dns-default-gdlqb\" (UID: \"488cae6f-2862-4e11-a22d-7d39de3604c3\") " pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.666112 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-mountpoint-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.666651 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b2b12051-1d9c-4eab-89ff-66cba2d03717-node-bootstrap-token\") pod \"machine-config-server-k24sk\" (UID: \"b2b12051-1d9c-4eab-89ff-66cba2d03717\") " pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.666712 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/a7c35d38-f660-461e-84b5-66be0efed3da-csi-data-dir\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.667954 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.673579 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fcda7605-c410-4f58-a935-82ca1f15c81b-cert\") pod \"ingress-canary-zd9pn\" (UID: \"fcda7605-c410-4f58-a935-82ca1f15c81b\") " pod="openshift-ingress-canary/ingress-canary-zd9pn" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.685719 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xjxx\" (UniqueName: \"kubernetes.io/projected/d98c0234-3282-4f09-b2df-a3153363542c-kube-api-access-7xjxx\") pod \"kube-storage-version-migrator-operator-b67b599dd-dwx5d\" (UID: \"d98c0234-3282-4f09-b2df-a3153363542c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.686012 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-bound-sa-token\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.687601 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" event={"ID":"c8c61473-94e6-460c-a307-1b2f727a24ed","Type":"ContainerStarted","Data":"20ffa9ca3e34f3246b2befe6dad017972a266921c3af9cf00a3a8cecd5f4608c"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.692290 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" event={"ID":"e2421e52-1edf-4403-bf98-3738f28fefa5","Type":"ContainerStarted","Data":"71bca74bc2b841c3d25e638b269290bf2510398e97fc533645862cf45124419c"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.692340 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" event={"ID":"e2421e52-1edf-4403-bf98-3738f28fefa5","Type":"ContainerStarted","Data":"d4cc8b0fbad88f6fd88aa92e2d318670bba2a6ee01376d362fb172c14a3eba66"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.694664 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" event={"ID":"b16b30f2-2578-46ab-b622-4f979193c91c","Type":"ContainerStarted","Data":"d3076a7fee1b6464e5f7db124621e623d072e701af17ec4b9bd4360f32679566"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.694747 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" event={"ID":"b16b30f2-2578-46ab-b622-4f979193c91c","Type":"ContainerStarted","Data":"e3d6d7b2dc140ddfbb7cc2cb9e0dede749f11b6c00c224436b7382a739059937"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.695855 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" event={"ID":"0b50c17b-56ad-4488-851a-2ace4d4c1184","Type":"ContainerStarted","Data":"dcc5011292acb8f3f82b41bd9a89132c9a55a48449e83c43433555f5d1a0d359"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.697800 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-92c94" event={"ID":"156a6fa0-bc96-492d-88d9-6c4873dff771","Type":"ContainerStarted","Data":"34bfc0b5693112a309c4f23dcda2ab15e7d0c9343cfff89f1b44cf29652ee816"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.698016 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-92c94" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.698641 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kszjk\" (UniqueName: \"kubernetes.io/projected/39d5b08d-6337-4abb-9e94-191b0c6ca2db-kube-api-access-kszjk\") pod \"machine-config-controller-84d6567774-vhqdb\" (UID: \"39d5b08d-6337-4abb-9e94-191b0c6ca2db\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.699129 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" event={"ID":"9ed517c9-f353-4a74-9d38-7f5fb3c166ca","Type":"ContainerStarted","Data":"89f766c7d79787e06a94816fa2311f824bf13cf2ee3d217557bec7080f927789"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.700835 4995 generic.go:334] "Generic (PLEG): container finished" podID="00e95f8d-9e90-4c7c-8030-05f41539b15c" containerID="7af8821007c3b28c56378bd36967f00457c799b84c3d868c2134dcf349f91cca" exitCode=0 Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.700879 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" event={"ID":"00e95f8d-9e90-4c7c-8030-05f41539b15c","Type":"ContainerDied","Data":"7af8821007c3b28c56378bd36967f00457c799b84c3d868c2134dcf349f91cca"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.703238 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/488cae6f-2862-4e11-a22d-7d39de3604c3-metrics-tls\") pod \"dns-default-gdlqb\" (UID: \"488cae6f-2862-4e11-a22d-7d39de3604c3\") " pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.703261 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffzsd\" (UniqueName: \"kubernetes.io/projected/c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6-kube-api-access-ffzsd\") pod \"service-ca-9c57cc56f-62hj9\" (UID: \"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6\") " pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.704562 4995 patch_prober.go:28] interesting pod/downloads-7954f5f757-92c94 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.704612 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-92c94" podUID="156a6fa0-bc96-492d-88d9-6c4873dff771" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.708680 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" event={"ID":"620b0366-f02c-4d94-aceb-a0ee38453cdb","Type":"ContainerStarted","Data":"2179a847f44e079589e99e2dc517828ad8cfbbb4291405d719cb04d3477f2a5a"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.711518 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" event={"ID":"b88fed33-25aa-4de1-9a11-ddd5dc808758","Type":"ContainerStarted","Data":"63805fbef06aa5b8142d2ce484f488160e5c8d78579e6ca412d170797ae5c267"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.714003 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jzs8c" event={"ID":"2cd4e02b-cb10-4bb2-b318-d24372346b1d","Type":"ContainerStarted","Data":"99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.714053 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jzs8c" event={"ID":"2cd4e02b-cb10-4bb2-b318-d24372346b1d","Type":"ContainerStarted","Data":"fe66f518d0c883f269ac72f6f10b6bc376ed19a30ceb73f2fd7b63bf7986eeff"} Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.731613 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-469t8\" (UniqueName: \"kubernetes.io/projected/231962b2-f585-4eeb-9477-4fd547c803f4-kube-api-access-469t8\") pod \"migrator-59844c95c7-kd9rt\" (UID: \"231962b2-f585-4eeb-9477-4fd547c803f4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.744317 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: E0120 16:33:46.744788 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.244767671 +0000 UTC m=+145.489372567 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.749297 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.754031 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c80e6dd0-f551-4ad6-99c3-d96c50bb380b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-rdsbm\" (UID: \"c80e6dd0-f551-4ad6-99c3-d96c50bb380b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.764513 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.770331 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdzh5\" (UniqueName: \"kubernetes.io/projected/231817fb-8c3c-4a4f-ad19-f019974187e7-kube-api-access-tdzh5\") pod \"apiserver-7bbb656c7d-5mmhx\" (UID: \"231817fb-8c3c-4a4f-ad19-f019974187e7\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.779922 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.799371 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.810415 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk457\" (UniqueName: \"kubernetes.io/projected/488cae6f-2862-4e11-a22d-7d39de3604c3-kube-api-access-jk457\") pod \"dns-default-gdlqb\" (UID: \"488cae6f-2862-4e11-a22d-7d39de3604c3\") " pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.844334 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.845281 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:46 crc kubenswrapper[4995]: E0120 16:33:46.846633 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.346585629 +0000 UTC m=+145.591190435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.852809 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqnkh\" (UniqueName: \"kubernetes.io/projected/b2b12051-1d9c-4eab-89ff-66cba2d03717-kube-api-access-bqnkh\") pod \"machine-config-server-k24sk\" (UID: \"b2b12051-1d9c-4eab-89ff-66cba2d03717\") " pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.862246 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk8xx\" (UniqueName: \"kubernetes.io/projected/a7c35d38-f660-461e-84b5-66be0efed3da-kube-api-access-sk8xx\") pod \"csi-hostpathplugin-hpccj\" (UID: \"a7c35d38-f660-461e-84b5-66be0efed3da\") " pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.895575 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-7wptg"] Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.899177 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46dh8\" (UniqueName: \"kubernetes.io/projected/fcda7605-c410-4f58-a935-82ca1f15c81b-kube-api-access-46dh8\") pod \"ingress-canary-zd9pn\" (UID: \"fcda7605-c410-4f58-a935-82ca1f15c81b\") " pod="openshift-ingress-canary/ingress-canary-zd9pn" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.899383 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.916274 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-k24sk" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.944427 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hpccj" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.946716 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:46 crc kubenswrapper[4995]: E0120 16:33:46.946997 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.446985189 +0000 UTC m=+145.691589985 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.949841 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-zd9pn" Jan 20 16:33:46 crc kubenswrapper[4995]: I0120 16:33:46.981479 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs"] Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.039309 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.048210 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.048519 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.54850341 +0000 UTC m=+145.793108216 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.065653 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6"] Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.119182 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq"] Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.131879 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-8dc2q"] Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.150372 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.150757 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.650699599 +0000 UTC m=+145.895304405 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.160220 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb"] Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.162575 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t456c"] Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.166011 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz"] Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.168669 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-55g56"] Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.251841 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.252298 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.752280501 +0000 UTC m=+145.996885307 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.353225 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.354089 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.854056009 +0000 UTC m=+146.098660815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.419543 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5jck" podStartSLOduration=126.4195271 podStartE2EDuration="2m6.4195271s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:47.41811403 +0000 UTC m=+145.662718826" watchObservedRunningTime="2026-01-20 16:33:47.4195271 +0000 UTC m=+145.664131916" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.455103 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.455508 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:47.955492407 +0000 UTC m=+146.200097213 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.497383 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-l4kss" podStartSLOduration=126.497365171 podStartE2EDuration="2m6.497365171s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:47.467546398 +0000 UTC m=+145.712151204" watchObservedRunningTime="2026-01-20 16:33:47.497365171 +0000 UTC m=+145.741969977" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.555335 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn"] Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.557550 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.557901 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.057887082 +0000 UTC m=+146.302491888 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.658565 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.658721 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.158697913 +0000 UTC m=+146.403302719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.658895 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.659288 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.15927228 +0000 UTC m=+146.403877086 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.737853 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" event={"ID":"43165ab8-b1a3-4885-88f6-bc83ef03f454","Type":"ContainerStarted","Data":"fc33b36f0507816deb12e603bb0a954e6793c5924a0dd7d01460467335d5a132"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.749271 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" event={"ID":"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2","Type":"ContainerStarted","Data":"5b8cd108819cc08ed71af5d79d35c9e1e3e242c625def435721b78febdbc47fe"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.759694 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.760034 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.260017108 +0000 UTC m=+146.504621914 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.781590 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" event={"ID":"00e95f8d-9e90-4c7c-8030-05f41539b15c","Type":"ContainerStarted","Data":"ef3f1e8a7f2aa69f5caf052fcbab86ebf501bf8c50e95785e0c2c46fb40efae8"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.782116 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.787696 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" event={"ID":"10d3852f-ae68-471d-8501-a31f353ae0cd","Type":"ContainerStarted","Data":"c03c5750fe8641bd8c6c30c1bae26f1503440d5cc9ed4afae5e79605dbf892db"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.794230 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" event={"ID":"0b50c17b-56ad-4488-851a-2ace4d4c1184","Type":"ContainerStarted","Data":"6fb70bee8221e70a5d15e35a98157425aca9ac72c10e0b03e00db9f629d342cc"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.797954 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-55g56" event={"ID":"ab7c9813-036f-4c85-82f8-389afa92ecbc","Type":"ContainerStarted","Data":"baea520ebe631ad4f1b19ae76afcc5b6042d6e1a92899efac28f6acd058c1b08"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.798641 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-rfjfc" podStartSLOduration=127.798621569 podStartE2EDuration="2m7.798621569s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:47.755227763 +0000 UTC m=+145.999832569" watchObservedRunningTime="2026-01-20 16:33:47.798621569 +0000 UTC m=+146.043226375" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.802163 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-tbsrs" podStartSLOduration=126.8021494 podStartE2EDuration="2m6.8021494s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:47.79756612 +0000 UTC m=+146.042170926" watchObservedRunningTime="2026-01-20 16:33:47.8021494 +0000 UTC m=+146.046754216" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.806881 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-k24sk" event={"ID":"b2b12051-1d9c-4eab-89ff-66cba2d03717","Type":"ContainerStarted","Data":"dec51d50d8601697c6fd3b59eaa3826609731f6970fd9a0dce376a18aded06cc"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.816660 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" event={"ID":"d87453b6-da26-4486-ac4d-cf2798843007","Type":"ContainerStarted","Data":"6be763abb9e0aebc2272b5feec965e7bd4a5a1e1d31e865ed778c937dddfc7fc"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.823640 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" event={"ID":"b897ebd4-5389-4a32-84d2-27f584c7faab","Type":"ContainerStarted","Data":"95de356c07076d9deb879daddd51c92c109a9ad64473855e7f90f41b60b42dd8"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.840998 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" event={"ID":"2a8fc6c3-ef12-4e57-a446-0cfed712d95e","Type":"ContainerStarted","Data":"dcad1e59f7803f8c1f41ddc891113131dbfd8dc7b89f435ed160280f1b03755e"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.846998 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" event={"ID":"5573e17e-4b7e-4afd-8608-e8afd1c98256","Type":"ContainerStarted","Data":"d27711f99f51ad023ef30f8a959ee8de661671aff5cf2b92d138da8e19f1abe1"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.858191 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" event={"ID":"5d649f74-8f55-43de-b6d5-5203c18f126b","Type":"ContainerStarted","Data":"1209511bef7cd592d29531a94929d338e35d609ce1c1e1ff23a3d37c7777c2df"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.858424 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" event={"ID":"5d649f74-8f55-43de-b6d5-5203c18f126b","Type":"ContainerStarted","Data":"ee73d06e6f24b3e21d024b175e0db3755e5acdc67e61d989e4049edf33622be7"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.859811 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.863577 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.864100 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.36406657 +0000 UTC m=+146.608671426 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.866428 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-9m9xl" event={"ID":"9ccd2a3c-8848-42cb-96fa-a9df5a60a729","Type":"ContainerStarted","Data":"3208eaa6e5e1702b97f6e5830977fda3a2068dd812cbda172d6c7bf5268b9a25"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.866554 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-9m9xl" event={"ID":"9ccd2a3c-8848-42cb-96fa-a9df5a60a729","Type":"ContainerStarted","Data":"22a74786352de77eb4cf7d7c52ce38717d2d8a95700d9b45ac35764d7ae5995c"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.867019 4995 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-6k4z7 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.867141 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" podUID="5d649f74-8f55-43de-b6d5-5203c18f126b" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.876336 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" event={"ID":"35c557f8-2a15-4838-8042-16c5eb9ae8af","Type":"ContainerStarted","Data":"f8075de03e64d9be0c3e0b4afde258d05315500903f9fe178e74eac77b70c88d"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.886620 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" event={"ID":"c8c61473-94e6-460c-a307-1b2f727a24ed","Type":"ContainerStarted","Data":"a9465b4d4875f95c86ee702c174e411ad32f1c2e34d27f9be90c9d984680d589"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.886976 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.901758 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-jzs8c" podStartSLOduration=126.901733195 podStartE2EDuration="2m6.901733195s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:47.900942093 +0000 UTC m=+146.145546899" watchObservedRunningTime="2026-01-20 16:33:47.901733195 +0000 UTC m=+146.146338001" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.902889 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" event={"ID":"b16b30f2-2578-46ab-b622-4f979193c91c","Type":"ContainerStarted","Data":"cf5d072d7434fc3d892c7266ac90cb1785644b989df7c625cad784f0da69578e"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.908589 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" event={"ID":"9ed517c9-f353-4a74-9d38-7f5fb3c166ca","Type":"ContainerStarted","Data":"cb03c7674d488c2fa850e7c43cbdb5acf4dad82b1a2f3e48efca4a8df1562ac2"} Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.910720 4995 patch_prober.go:28] interesting pod/downloads-7954f5f757-92c94 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" start-of-body= Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.910759 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-92c94" podUID="156a6fa0-bc96-492d-88d9-6c4873dff771" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.6:8080/\": dial tcp 10.217.0.6:8080: connect: connection refused" Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.966431 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.966606 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.466584279 +0000 UTC m=+146.711189085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:47 crc kubenswrapper[4995]: I0120 16:33:47.967059 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:47 crc kubenswrapper[4995]: E0120 16:33:47.974652 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.474641227 +0000 UTC m=+146.719246033 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.078022 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.078109 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.078440 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.578408311 +0000 UTC m=+146.823013117 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.078560 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.078587 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.078612 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.078639 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.078665 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.081221 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.081522 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.581505368 +0000 UTC m=+146.826110174 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.097956 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.098509 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.108167 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.143209 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-9z4p9" podStartSLOduration=127.143186352 podStartE2EDuration="2m7.143186352s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:48.14239375 +0000 UTC m=+146.386998556" watchObservedRunningTime="2026-01-20 16:33:48.143186352 +0000 UTC m=+146.387791158" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.182733 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.183309 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.683291767 +0000 UTC m=+146.927896573 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.222105 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-92c94" podStartSLOduration=127.222067542 podStartE2EDuration="2m7.222067542s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:48.220603371 +0000 UTC m=+146.465208177" watchObservedRunningTime="2026-01-20 16:33:48.222067542 +0000 UTC m=+146.466672348" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.232450 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.256972 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.257991 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.285653 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.285950 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.785939429 +0000 UTC m=+147.030544235 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.317025 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r"] Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.345441 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c"] Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.407426 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.407772 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:48.907759184 +0000 UTC m=+147.152363990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.520812 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.521180 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.02116585 +0000 UTC m=+147.265770656 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.547250 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.626597 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.626955 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.126940021 +0000 UTC m=+147.371544827 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.641244 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" podStartSLOduration=127.641226115 podStartE2EDuration="2m7.641226115s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:48.640850134 +0000 UTC m=+146.885454940" watchObservedRunningTime="2026-01-20 16:33:48.641226115 +0000 UTC m=+146.885830911" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.669409 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:48 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:48 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:48 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.669461 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.701616 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" podStartSLOduration=128.701601752 podStartE2EDuration="2m8.701601752s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:48.701058226 +0000 UTC m=+146.945663032" watchObservedRunningTime="2026-01-20 16:33:48.701601752 +0000 UTC m=+146.946206558" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.712336 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx"] Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.713997 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-62hj9"] Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.746804 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.746918 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-zhdmz" podStartSLOduration=128.746894863 podStartE2EDuration="2m8.746894863s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:48.746054199 +0000 UTC m=+146.990659005" watchObservedRunningTime="2026-01-20 16:33:48.746894863 +0000 UTC m=+146.991499669" Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.747150 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.24713763 +0000 UTC m=+147.491742436 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.763830 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq"] Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.850544 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.851202 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.351180991 +0000 UTC m=+147.595785797 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.855989 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" podStartSLOduration=127.855972017 podStartE2EDuration="2m7.855972017s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:48.792544753 +0000 UTC m=+147.037149559" watchObservedRunningTime="2026-01-20 16:33:48.855972017 +0000 UTC m=+147.100576823" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.857859 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-9m9xl" podStartSLOduration=127.8578533 podStartE2EDuration="2m7.8578533s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:48.855538735 +0000 UTC m=+147.100143541" watchObservedRunningTime="2026-01-20 16:33:48.8578533 +0000 UTC m=+147.102458106" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.881066 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4"] Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.881122 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-gdlqb"] Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.950966 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4vgm" podStartSLOduration=127.950944783 podStartE2EDuration="2m7.950944783s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:48.922773606 +0000 UTC m=+147.167378412" watchObservedRunningTime="2026-01-20 16:33:48.950944783 +0000 UTC m=+147.195549589" Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.952415 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-6wxvh"] Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.956461 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:48 crc kubenswrapper[4995]: E0120 16:33:48.956796 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.456761327 +0000 UTC m=+147.701366133 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:48 crc kubenswrapper[4995]: I0120 16:33:48.970145 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-zd9pn"] Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.026535 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-hpccj"] Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.061181 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:49 crc kubenswrapper[4995]: E0120 16:33:49.061759 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.561745435 +0000 UTC m=+147.806350241 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.066279 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-vw77m"] Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.085194 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d"] Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.108714 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt"] Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.108742 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-55g56" event={"ID":"ab7c9813-036f-4c85-82f8-389afa92ecbc","Type":"ContainerStarted","Data":"01321f2f1417d77cdc7c2b5ffac55e4d27e93294cf9ae8940e188b5750e96abf"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.108760 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.122878 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" event={"ID":"43165ab8-b1a3-4885-88f6-bc83ef03f454","Type":"ContainerStarted","Data":"34843abfb0b748b41038d6bd4d097fbbf550786c4fb4b98a4cbced79167ba9a7"} Jan 20 16:33:49 crc kubenswrapper[4995]: W0120 16:33:49.122921 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39d5b08d_6337_4abb_9e94_191b0c6ca2db.slice/crio-b608d690b104479dd83aef22c21d91c18d8355fef7f04df7576b1d281d56c1b5 WatchSource:0}: Error finding container b608d690b104479dd83aef22c21d91c18d8355fef7f04df7576b1d281d56c1b5: Status 404 returned error can't find the container with id b608d690b104479dd83aef22c21d91c18d8355fef7f04df7576b1d281d56c1b5 Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.137506 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb"] Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.146516 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm"] Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.166441 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:49 crc kubenswrapper[4995]: E0120 16:33:49.166769 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.666757255 +0000 UTC m=+147.911362061 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.175665 4995 csr.go:261] certificate signing request csr-99l6f is approved, waiting to be issued Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.175691 4995 csr.go:257] certificate signing request csr-99l6f is issued Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.176649 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-55g56" podStartSLOduration=129.176637204 podStartE2EDuration="2m9.176637204s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.175929004 +0000 UTC m=+147.420533810" watchObservedRunningTime="2026-01-20 16:33:49.176637204 +0000 UTC m=+147.421242000" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.183699 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" event={"ID":"35c557f8-2a15-4838-8042-16c5eb9ae8af","Type":"ContainerStarted","Data":"eb1632aea4ed118aaab1c140c6678feb3bc7b65a4b62b88bc389d2aa596419b7"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.215850 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" event={"ID":"2a8fc6c3-ef12-4e57-a446-0cfed712d95e","Type":"ContainerStarted","Data":"b4df384ccf60edc369c96829c54731583511fb6ab481c63a0cf09303e8edf188"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.217045 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.223738 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" podStartSLOduration=129.223718085 podStartE2EDuration="2m9.223718085s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.206226551 +0000 UTC m=+147.450831357" watchObservedRunningTime="2026-01-20 16:33:49.223718085 +0000 UTC m=+147.468322891" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.235394 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rvmnn" podStartSLOduration=128.235380625 podStartE2EDuration="2m8.235380625s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.232603616 +0000 UTC m=+147.477208422" watchObservedRunningTime="2026-01-20 16:33:49.235380625 +0000 UTC m=+147.479985421" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.264464 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" event={"ID":"d87453b6-da26-4486-ac4d-cf2798843007","Type":"ContainerStarted","Data":"78ed296a9420df1d848ad4afb019196a54351adc547304940015a96899c40e3b"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.264503 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" event={"ID":"d87453b6-da26-4486-ac4d-cf2798843007","Type":"ContainerStarted","Data":"b2824ed55de240beaef1bcbc8cd33e58980fefcd3ea7214c9cf3fc3fa9baa5a6"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.269878 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:49 crc kubenswrapper[4995]: E0120 16:33:49.271587 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.771568758 +0000 UTC m=+148.016173564 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.287667 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" podStartSLOduration=129.287652093 podStartE2EDuration="2m9.287652093s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.28650898 +0000 UTC m=+147.531113786" watchObservedRunningTime="2026-01-20 16:33:49.287652093 +0000 UTC m=+147.532256899" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.294510 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" event={"ID":"10d3852f-ae68-471d-8501-a31f353ae0cd","Type":"ContainerStarted","Data":"596f4058dd61da3eab8cce9db52d78ca2e3c0ae240565ac7b8b66d04bb34609c"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.295401 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.316378 4995 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-t456c container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.316422 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.328469 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-7wptg" podStartSLOduration=128.328455946 podStartE2EDuration="2m8.328455946s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.326497092 +0000 UTC m=+147.571101898" watchObservedRunningTime="2026-01-20 16:33:49.328455946 +0000 UTC m=+147.573060752" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.329857 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-k24sk" event={"ID":"b2b12051-1d9c-4eab-89ff-66cba2d03717","Type":"ContainerStarted","Data":"60083140892adf207b650618993d4c91ba15c2e649123bc837504b9c512bc819"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.338234 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" event={"ID":"231817fb-8c3c-4a4f-ad19-f019974187e7","Type":"ContainerStarted","Data":"4065d5d4380223a1737f0399d57a890cee21be9bdcf41ecb4c4be306e3e6f67f"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.370726 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" podStartSLOduration=128.370711601 podStartE2EDuration="2m8.370711601s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.360606406 +0000 UTC m=+147.605211212" watchObservedRunningTime="2026-01-20 16:33:49.370711601 +0000 UTC m=+147.615316407" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.377293 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" event={"ID":"ebfe46d9-fd72-4faa-8d1f-cd1e491eb8b2","Type":"ContainerStarted","Data":"d54d4b2657536714438886fb468bc30b404bde703cf8a3e178e45bbf65a569d0"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.378132 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.380035 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-k24sk" podStartSLOduration=6.380026355 podStartE2EDuration="6.380026355s" podCreationTimestamp="2026-01-20 16:33:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.378482091 +0000 UTC m=+147.623086897" watchObservedRunningTime="2026-01-20 16:33:49.380026355 +0000 UTC m=+147.624631161" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.383486 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:49 crc kubenswrapper[4995]: E0120 16:33:49.383742 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.88373233 +0000 UTC m=+148.128337126 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.410101 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" event={"ID":"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a","Type":"ContainerStarted","Data":"59d8bae9b0aa97b8d0fba8fa744a4b3f605b0668e800c9394101e6055d0239f2"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.421743 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.435806 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" event={"ID":"9ed517c9-f353-4a74-9d38-7f5fb3c166ca","Type":"ContainerStarted","Data":"537d301219f26c27994c990858f3d9fb05186825a9c6c4fdc920d8c452515329"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.468992 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-5dkkz" podStartSLOduration=128.468978211 podStartE2EDuration="2m8.468978211s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.434867675 +0000 UTC m=+147.679472481" watchObservedRunningTime="2026-01-20 16:33:49.468978211 +0000 UTC m=+147.713583017" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.485525 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:49 crc kubenswrapper[4995]: E0120 16:33:49.489311 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:49.989230133 +0000 UTC m=+148.233834989 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.489657 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" event={"ID":"0b50c17b-56ad-4488-851a-2ace4d4c1184","Type":"ContainerStarted","Data":"51cc6e1352a47650e0e4a8590a2d421c6b91ff062fbfbc2faaa1f5fe33c732fd"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.495789 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" event={"ID":"010bede8-799b-47ad-88ae-3da08414f00e","Type":"ContainerStarted","Data":"e019b26ba8288cc4fa2b5108de42d43c3b9ce22b0e46cb88ffe9ffbcbf5c9a74"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.535677 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" event={"ID":"5573e17e-4b7e-4afd-8608-e8afd1c98256","Type":"ContainerStarted","Data":"bfeff12175a0ead2dd08f27a7befb55d18a4f6df91b42268c0cf60564a1bb12a"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.543116 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" event={"ID":"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6","Type":"ContainerStarted","Data":"aa13765dc81b04c8aa6024bfe0cc7ace1307cff46cb8ddb5a6430e8c966da2b1"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.566934 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:49 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:49 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:49 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.567011 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.571011 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" event={"ID":"b897ebd4-5389-4a32-84d2-27f584c7faab","Type":"ContainerStarted","Data":"235bd71c783b5921230f97b7442eb924368bbacfe4f101da62cdee1302d17111"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.571052 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" event={"ID":"b897ebd4-5389-4a32-84d2-27f584c7faab","Type":"ContainerStarted","Data":"8ee3d0c023d597f43d8eef500f63a68eca4bb4b6a6af78127bc02035e5423b48"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.571747 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.576696 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-h4lnx" podStartSLOduration=128.576680665 podStartE2EDuration="2m8.576680665s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.524228292 +0000 UTC m=+147.768833108" watchObservedRunningTime="2026-01-20 16:33:49.576680665 +0000 UTC m=+147.821285481" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.576946 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" podStartSLOduration=128.576942753 podStartE2EDuration="2m8.576942753s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.571380916 +0000 UTC m=+147.815985722" watchObservedRunningTime="2026-01-20 16:33:49.576942753 +0000 UTC m=+147.821547559" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.590230 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.593116 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" event={"ID":"b629e2c3-ed8f-4f3e-af06-ec30249e2af7","Type":"ContainerStarted","Data":"0c0ef1dc82db0a01b77b34751adbaf0e14ed5ad25af1644abeb0b4c896ca6651"} Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.593461 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" event={"ID":"b629e2c3-ed8f-4f3e-af06-ec30249e2af7","Type":"ContainerStarted","Data":"c6ae2cab93e143e4e01ceaf739768284cfd16839be43e5339c91e59d29f7dca0"} Jan 20 16:33:49 crc kubenswrapper[4995]: E0120 16:33:49.601693 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:50.101678162 +0000 UTC m=+148.346282958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.603017 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f5x87" podStartSLOduration=130.603002499 podStartE2EDuration="2m10.603002499s" podCreationTimestamp="2026-01-20 16:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.601381994 +0000 UTC m=+147.845986800" watchObservedRunningTime="2026-01-20 16:33:49.603002499 +0000 UTC m=+147.847607305" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.624571 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-6k4z7" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.636376 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-xxvtq" podStartSLOduration=128.636358603 podStartE2EDuration="2m8.636358603s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.635927651 +0000 UTC m=+147.880532457" watchObservedRunningTime="2026-01-20 16:33:49.636358603 +0000 UTC m=+147.880963409" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.678931 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" podStartSLOduration=129.678911177 podStartE2EDuration="2m9.678911177s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.671385964 +0000 UTC m=+147.915990780" watchObservedRunningTime="2026-01-20 16:33:49.678911177 +0000 UTC m=+147.923515983" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.691306 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:49 crc kubenswrapper[4995]: E0120 16:33:49.693831 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:50.193811668 +0000 UTC m=+148.438416474 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.762596 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" podStartSLOduration=128.762579251 podStartE2EDuration="2m8.762579251s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:49.715763859 +0000 UTC m=+147.960368665" watchObservedRunningTime="2026-01-20 16:33:49.762579251 +0000 UTC m=+148.007184057" Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.793895 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:49 crc kubenswrapper[4995]: E0120 16:33:49.795259 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:50.295246816 +0000 UTC m=+148.539851612 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.911487 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:49 crc kubenswrapper[4995]: E0120 16:33:49.911989 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:50.411975526 +0000 UTC m=+148.656580332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:49 crc kubenswrapper[4995]: I0120 16:33:49.970430 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-55g56" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.023697 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.024004 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:50.523992814 +0000 UTC m=+148.768597620 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.127514 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.127785 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:50.627763478 +0000 UTC m=+148.872368284 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.146497 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.191839 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-20 16:28:49 +0000 UTC, rotation deadline is 2026-10-26 03:31:43.641424801 +0000 UTC Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.192107 4995 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6682h57m53.449320284s for next certificate rotation Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.228752 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.229034 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:50.729022881 +0000 UTC m=+148.973627687 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.331727 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.331942 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:50.831921981 +0000 UTC m=+149.076526787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.358537 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5f7mb" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.436902 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.437380 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:50.937368972 +0000 UTC m=+149.181973778 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.541788 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.542058 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.042043282 +0000 UTC m=+149.286648088 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.553429 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:50 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:50 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:50 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.553482 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.577475 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nmxgc"] Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.578346 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.583399 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.587633 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nmxgc"] Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.643370 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t9wt\" (UniqueName: \"kubernetes.io/projected/b57345df-b284-4e63-b77d-f60534099876-kube-api-access-9t9wt\") pod \"community-operators-nmxgc\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.643629 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-utilities\") pod \"community-operators-nmxgc\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.643653 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-catalog-content\") pod \"community-operators-nmxgc\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.643674 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.643940 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.143930583 +0000 UTC m=+149.388535389 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.703146 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" event={"ID":"0b77c9d0-1343-4fd0-992a-bd2a4517bf0a","Type":"ContainerStarted","Data":"b8563f2c56b8d91274171dc9689788ae0e94052814bd31bdc08d412156d5f5b9"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.725439 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" event={"ID":"c80e6dd0-f551-4ad6-99c3-d96c50bb380b","Type":"ContainerStarted","Data":"6b7f1866b7b14d494a9d343b9aabd1889c3ef1a47a68c5332b3ef028cbffb2a6"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.736205 4995 generic.go:334] "Generic (PLEG): container finished" podID="231817fb-8c3c-4a4f-ad19-f019974187e7" containerID="672d72d363afc8998ec6cf0617d1b232c2a2c3dd7d44727ae967827a5c1e25b4" exitCode=0 Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.736278 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" event={"ID":"231817fb-8c3c-4a4f-ad19-f019974187e7","Type":"ContainerDied","Data":"672d72d363afc8998ec6cf0617d1b232c2a2c3dd7d44727ae967827a5c1e25b4"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.744096 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.744330 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t9wt\" (UniqueName: \"kubernetes.io/projected/b57345df-b284-4e63-b77d-f60534099876-kube-api-access-9t9wt\") pod \"community-operators-nmxgc\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.744360 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-utilities\") pod \"community-operators-nmxgc\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.744384 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-catalog-content\") pod \"community-operators-nmxgc\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.744750 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-catalog-content\") pod \"community-operators-nmxgc\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.745410 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.245388971 +0000 UTC m=+149.489993847 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.745988 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-utilities\") pod \"community-operators-nmxgc\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.757364 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9jh8c" event={"ID":"010bede8-799b-47ad-88ae-3da08414f00e","Type":"ContainerStarted","Data":"bf33a2da87dd2c2d493f2d9632eb3c0f045da0236eb5fdc59b7412bab0157221"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.760254 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-bcs2r" podStartSLOduration=129.760239032 podStartE2EDuration="2m9.760239032s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:50.753584223 +0000 UTC m=+148.998189029" watchObservedRunningTime="2026-01-20 16:33:50.760239032 +0000 UTC m=+149.004843838" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.775690 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w9bjs"] Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.776598 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.778039 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" event={"ID":"c9fb87c2-dc7e-4c40-bdfd-3b460d7770f6","Type":"ContainerStarted","Data":"7e713f4185ae7c3017869a344a28218814c4a288d13490b65431db33499907fd"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.793492 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.798857 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w9bjs"] Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.806194 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t9wt\" (UniqueName: \"kubernetes.io/projected/b57345df-b284-4e63-b77d-f60534099876-kube-api-access-9t9wt\") pod \"community-operators-nmxgc\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.853536 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.855273 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.355257848 +0000 UTC m=+149.599862654 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.860926 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gdlqb" event={"ID":"488cae6f-2862-4e11-a22d-7d39de3604c3","Type":"ContainerStarted","Data":"1eb828273d1234b2699e07da7e59ce755de5604f6ac9ec28913c9a6e70c80fec"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.860986 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gdlqb" event={"ID":"488cae6f-2862-4e11-a22d-7d39de3604c3","Type":"ContainerStarted","Data":"c3f329bc8ce0d345033dfbf944aedde108b25246b1ab8c564511c1e48cf23ff7"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.866790 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-62hj9" podStartSLOduration=129.866778214 podStartE2EDuration="2m9.866778214s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:50.865709543 +0000 UTC m=+149.110314369" watchObservedRunningTime="2026-01-20 16:33:50.866778214 +0000 UTC m=+149.111383020" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.882705 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"cf8ca77652ebf1de0449eff0e7d005f43a88d50098eed576f206bc749aa6b3e8"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.882744 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"f48e3d34d651fb1f5053faac196ed0bce02bb822b7f94019861449308921def7"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.906128 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" event={"ID":"a0d62048-3df5-4737-adc3-5544e1402f06","Type":"ContainerStarted","Data":"d93c17ca4aed91a097a31acb389998fa5ef17daa12c846eb0284383d8c716e42"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.906176 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" event={"ID":"a0d62048-3df5-4737-adc3-5544e1402f06","Type":"ContainerStarted","Data":"ec953025fceb01c7f8b1cd81be63b574a21f9e8fb7496c8e64f02e63241d30aa"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.907033 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.931597 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" event={"ID":"b01271cc-9d58-4948-9a83-564b481d8eff","Type":"ContainerStarted","Data":"75f1ffb09a0872c3639a92f366a93bf05c8fda5fe5ee4a3f4d14ea9c3a7c1fa9"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.931656 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" event={"ID":"b01271cc-9d58-4948-9a83-564b481d8eff","Type":"ContainerStarted","Data":"3b6435920902e52f509f71b7482fbb7ce0ab754ac5375ff4fdc282e330938fd6"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.932734 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.946011 4995 generic.go:334] "Generic (PLEG): container finished" podID="7bd41743-62a2-4cec-84c0-68fef9f476e4" containerID="709b95f48729b4b0a70f6ed48d87084f0ec7de053c333b6ca684100b53e995a6" exitCode=0 Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.947493 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" event={"ID":"7bd41743-62a2-4cec-84c0-68fef9f476e4","Type":"ContainerDied","Data":"709b95f48729b4b0a70f6ed48d87084f0ec7de053c333b6ca684100b53e995a6"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.947529 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" event={"ID":"7bd41743-62a2-4cec-84c0-68fef9f476e4","Type":"ContainerStarted","Data":"62f9aad070b8c228967c506ef18846e55d44b5dd76fb4e99b4dba1da0329405b"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.951182 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hpccj" event={"ID":"a7c35d38-f660-461e-84b5-66be0efed3da","Type":"ContainerStarted","Data":"2ce2beb797447e1a73673e61436fca87c94e8f0ff6c88183bb85370d341f14d6"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.951217 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hpccj" event={"ID":"a7c35d38-f660-461e-84b5-66be0efed3da","Type":"ContainerStarted","Data":"a23f06eb98c0eead547f2593e2162a98e3342182444a376eccf09be3d64432b1"} Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.966572 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.967321 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.967825 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.467810311 +0000 UTC m=+149.712415117 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.967860 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-utilities\") pod \"certified-operators-w9bjs\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.967933 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcjdk\" (UniqueName: \"kubernetes.io/projected/7087f2d6-d879-419d-bd93-538d617dcc91-kube-api-access-zcjdk\") pod \"certified-operators-w9bjs\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.967983 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-catalog-content\") pod \"certified-operators-w9bjs\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.968212 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.968552 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" podStartSLOduration=129.968541111 podStartE2EDuration="2m9.968541111s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:50.966706519 +0000 UTC m=+149.211311325" watchObservedRunningTime="2026-01-20 16:33:50.968541111 +0000 UTC m=+149.213145927" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.971090 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-59fd5"] Jan 20 16:33:50 crc kubenswrapper[4995]: E0120 16:33:50.971571 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.471553807 +0000 UTC m=+149.716158603 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.971969 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:50 crc kubenswrapper[4995]: I0120 16:33:50.985292 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.009401 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-zd9pn" event={"ID":"fcda7605-c410-4f58-a935-82ca1f15c81b","Type":"ContainerStarted","Data":"3d32b4a0cb62ad06b0b7d9040425ca956be78b7b0576c636762a15b25129f11e"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.009443 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-zd9pn" event={"ID":"fcda7605-c410-4f58-a935-82ca1f15c81b","Type":"ContainerStarted","Data":"0b60147eb45cc998042374bccc758676bc60c1cf948c28a61559423f11360d9c"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.012499 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-59fd5"] Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.024310 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" event={"ID":"39d5b08d-6337-4abb-9e94-191b0c6ca2db","Type":"ContainerStarted","Data":"c4f1514a89812c0878768ac46a68b6a267f779d7be63ff07a962a825e010cee3"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.024352 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" event={"ID":"39d5b08d-6337-4abb-9e94-191b0c6ca2db","Type":"ContainerStarted","Data":"0e86970dd7cfd086c27a76a36f7da78041486beb13833d783ecf584f4bd6104b"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.024362 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" event={"ID":"39d5b08d-6337-4abb-9e94-191b0c6ca2db","Type":"ContainerStarted","Data":"b608d690b104479dd83aef22c21d91c18d8355fef7f04df7576b1d281d56c1b5"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.070580 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.070708 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcjdk\" (UniqueName: \"kubernetes.io/projected/7087f2d6-d879-419d-bd93-538d617dcc91-kube-api-access-zcjdk\") pod \"certified-operators-w9bjs\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.070735 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-catalog-content\") pod \"certified-operators-w9bjs\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.070817 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-utilities\") pod \"certified-operators-w9bjs\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.071431 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-utilities\") pod \"certified-operators-w9bjs\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.071976 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.571962776 +0000 UTC m=+149.816567582 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.072532 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-catalog-content\") pod \"certified-operators-w9bjs\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.086209 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt" event={"ID":"231962b2-f585-4eeb-9477-4fd547c803f4","Type":"ContainerStarted","Data":"7606ef42c2a9d41bdf697e85f218ae469d01bd407eb76a29878220726461850a"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.086275 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt" event={"ID":"231962b2-f585-4eeb-9477-4fd547c803f4","Type":"ContainerStarted","Data":"35407136b2cf1ccab1ae6ab684fdbda4ede1494b99f3f936b269e81f87c8358d"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.115404 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" podStartSLOduration=130.115388923 podStartE2EDuration="2m10.115388923s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:51.051902419 +0000 UTC m=+149.296507225" watchObservedRunningTime="2026-01-20 16:33:51.115388923 +0000 UTC m=+149.359993729" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.120129 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"407b5d8ce3e72aaf5f501a453d2bb85e1b8c0c37924553d40ec8ccd4c22c7c06"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.120168 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"c1c18b4ead373cd1a22689aa32a0d93497e1b0e84d5f17b3f4936beaa2ec2de5"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.120367 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.137803 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcjdk\" (UniqueName: \"kubernetes.io/projected/7087f2d6-d879-419d-bd93-538d617dcc91-kube-api-access-zcjdk\") pod \"certified-operators-w9bjs\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.146331 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpgkb" event={"ID":"b629e2c3-ed8f-4f3e-af06-ec30249e2af7","Type":"ContainerStarted","Data":"00aa8fba476c340e992274cd0c01d09e936c5998e68a0740210938725c802b12"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.168666 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"2f17dead2eba93bf5de5a7eb64a70de126367505101572519658d45d700ac9a8"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.168703 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"4f457527a701b9ddb081696cd3958c9fe7db2e867881724aa93a40ec536a5088"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.169110 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-brxnm"] Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.170253 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.182059 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22dg9\" (UniqueName: \"kubernetes.io/projected/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-kube-api-access-22dg9\") pod \"certified-operators-brxnm\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.182146 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-catalog-content\") pod \"certified-operators-brxnm\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.182224 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.182247 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrc46\" (UniqueName: \"kubernetes.io/projected/f438d3ed-cdb7-438d-ba13-6ac749c18dea-kube-api-access-mrc46\") pod \"community-operators-59fd5\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.182329 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-utilities\") pod \"community-operators-59fd5\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.182359 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-catalog-content\") pod \"community-operators-59fd5\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.182397 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-utilities\") pod \"certified-operators-brxnm\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.184304 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.684289782 +0000 UTC m=+149.928894588 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.205341 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" event={"ID":"7b1451e1-776c-411e-9790-8091d11c01fd","Type":"ContainerStarted","Data":"74bd3145de6972f4c4496285e4187e00877d7d1f08615b8f186398014f58533e"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.205382 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" event={"ID":"7b1451e1-776c-411e-9790-8091d11c01fd","Type":"ContainerStarted","Data":"5403f40e31bb6985914a0f45f81f915384ee09edfe2473e80fb211ad11c35191"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.207796 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-brxnm"] Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.214971 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" event={"ID":"d98c0234-3282-4f09-b2df-a3153363542c","Type":"ContainerStarted","Data":"2a7190650ace398d7d512ecdffa76517dfa1dbffd2261424c4e25ef8e62b8130"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.215005 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" event={"ID":"d98c0234-3282-4f09-b2df-a3153363542c","Type":"ContainerStarted","Data":"232b66bcad736a6bba15e15edb18f682b967245a88e124765bdff9c732c7c9ab"} Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.224037 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.265744 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vhqdb" podStartSLOduration=130.265722824 podStartE2EDuration="2m10.265722824s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:51.263559743 +0000 UTC m=+149.508164559" watchObservedRunningTime="2026-01-20 16:33:51.265722824 +0000 UTC m=+149.510327630" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.285649 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.285720 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.785706339 +0000 UTC m=+150.030311145 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.289870 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-utilities\") pod \"community-operators-59fd5\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.289939 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-catalog-content\") pod \"community-operators-59fd5\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.290005 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-utilities\") pod \"certified-operators-brxnm\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.290042 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22dg9\" (UniqueName: \"kubernetes.io/projected/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-kube-api-access-22dg9\") pod \"certified-operators-brxnm\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.290058 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-catalog-content\") pod \"certified-operators-brxnm\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.290386 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.290461 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrc46\" (UniqueName: \"kubernetes.io/projected/f438d3ed-cdb7-438d-ba13-6ac749c18dea-kube-api-access-mrc46\") pod \"community-operators-59fd5\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.290904 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-zd9pn" podStartSLOduration=8.290895406 podStartE2EDuration="8.290895406s" podCreationTimestamp="2026-01-20 16:33:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:51.287915632 +0000 UTC m=+149.532520458" watchObservedRunningTime="2026-01-20 16:33:51.290895406 +0000 UTC m=+149.535500202" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.291739 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-utilities\") pod \"certified-operators-brxnm\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.296160 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-catalog-content\") pod \"certified-operators-brxnm\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.300915 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.80090237 +0000 UTC m=+150.045507176 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.301263 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-utilities\") pod \"community-operators-59fd5\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.302410 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-catalog-content\") pod \"community-operators-59fd5\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.316748 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22dg9\" (UniqueName: \"kubernetes.io/projected/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-kube-api-access-22dg9\") pod \"certified-operators-brxnm\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.322191 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrc46\" (UniqueName: \"kubernetes.io/projected/f438d3ed-cdb7-438d-ba13-6ac749c18dea-kube-api-access-mrc46\") pod \"community-operators-59fd5\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.337611 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.367952 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dwx5d" podStartSLOduration=130.367937664 podStartE2EDuration="2m10.367937664s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:51.337389471 +0000 UTC m=+149.581994277" watchObservedRunningTime="2026-01-20 16:33:51.367937664 +0000 UTC m=+149.612542470" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.391855 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.392285 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.892269972 +0000 UTC m=+150.136874778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.402961 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.492704 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.493201 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:51.993185746 +0000 UTC m=+150.237790552 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.539682 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.542456 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:51 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:51 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:51 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.542504 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.593832 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.594143 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:52.09412787 +0000 UTC m=+150.338732676 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.694943 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.695258 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:52.195245269 +0000 UTC m=+150.439850075 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.731961 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t8rpq" Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.796185 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.796595 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:52.296577454 +0000 UTC m=+150.541182260 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.897969 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:51 crc kubenswrapper[4995]: E0120 16:33:51.898373 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:52.398363193 +0000 UTC m=+150.642967999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.914206 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w9bjs"] Jan 20 16:33:51 crc kubenswrapper[4995]: W0120 16:33:51.968256 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7087f2d6_d879_419d_bd93_538d617dcc91.slice/crio-b05e2a30b27c31c5276f3d98e49c4bb30339518122a37efbb4ca4f9d078c3458 WatchSource:0}: Error finding container b05e2a30b27c31c5276f3d98e49c4bb30339518122a37efbb4ca4f9d078c3458: Status 404 returned error can't find the container with id b05e2a30b27c31c5276f3d98e49c4bb30339518122a37efbb4ca4f9d078c3458 Jan 20 16:33:51 crc kubenswrapper[4995]: I0120 16:33:51.987236 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nmxgc"] Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:51.999527 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:51.999884 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:52.499870493 +0000 UTC m=+150.744475299 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.044018 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-brxnm"] Jan 20 16:33:52 crc kubenswrapper[4995]: W0120 16:33:52.081108 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d61ecd0_1a0d_4e90_9ab6_49d15d546f6c.slice/crio-db88e9f8590b6b4477cf2b2205804482fd541082af8781e40f7a4817754984b7 WatchSource:0}: Error finding container db88e9f8590b6b4477cf2b2205804482fd541082af8781e40f7a4817754984b7: Status 404 returned error can't find the container with id db88e9f8590b6b4477cf2b2205804482fd541082af8781e40f7a4817754984b7 Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.100657 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.100944 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:52.60093383 +0000 UTC m=+150.845538636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.208237 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.208580 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:52.708564844 +0000 UTC m=+150.953169640 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.223479 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" event={"ID":"c80e6dd0-f551-4ad6-99c3-d96c50bb380b","Type":"ContainerStarted","Data":"61eef27d34f03e610e26b9722cb9c228b9efd11f1c59636453ffd1eba4504fae"} Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.232947 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" event={"ID":"231817fb-8c3c-4a4f-ad19-f019974187e7","Type":"ContainerStarted","Data":"8e8cb72fcc0cc65323d953449b5e7a6e8f3616921e3022158219324028b4aa65"} Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.240996 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-rdsbm" podStartSLOduration=131.24097892 podStartE2EDuration="2m11.24097892s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:52.238890491 +0000 UTC m=+150.483495297" watchObservedRunningTime="2026-01-20 16:33:52.24097892 +0000 UTC m=+150.485583726" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.259336 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-gdlqb" event={"ID":"488cae6f-2862-4e11-a22d-7d39de3604c3","Type":"ContainerStarted","Data":"41d96e475b0e8e89f6d15db2a5440984e4691fae057091d2e1e8e31e56d2b836"} Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.259961 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-gdlqb" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.263028 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" podStartSLOduration=131.263019114 podStartE2EDuration="2m11.263019114s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:52.261379137 +0000 UTC m=+150.505983943" watchObservedRunningTime="2026-01-20 16:33:52.263019114 +0000 UTC m=+150.507623910" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.281040 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-59fd5"] Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.286553 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" event={"ID":"7bd41743-62a2-4cec-84c0-68fef9f476e4","Type":"ContainerStarted","Data":"44f5cabca7abb5d5e82dc76358c975b6da1bf44d0c891136ba77fe131bba03aa"} Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.291098 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-gdlqb" podStartSLOduration=9.291072237 podStartE2EDuration="9.291072237s" podCreationTimestamp="2026-01-20 16:33:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:52.285210211 +0000 UTC m=+150.529815007" watchObservedRunningTime="2026-01-20 16:33:52.291072237 +0000 UTC m=+150.535677043" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.303812 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w9bjs" event={"ID":"7087f2d6-d879-419d-bd93-538d617dcc91","Type":"ContainerStarted","Data":"b05e2a30b27c31c5276f3d98e49c4bb30339518122a37efbb4ca4f9d078c3458"} Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.315560 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.316587 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:52.816571208 +0000 UTC m=+151.061176014 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.320010 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" event={"ID":"7b1451e1-776c-411e-9790-8091d11c01fd","Type":"ContainerStarted","Data":"3b3306332e8d7f35d7313476623aea872bfd3652f8d839ae95fc56ce47aaca51"} Jan 20 16:33:52 crc kubenswrapper[4995]: W0120 16:33:52.346324 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf438d3ed_cdb7_438d_ba13_6ac749c18dea.slice/crio-185d75aef69bbdeebdf6279ce92a149c3f1b4377a6a77a6d1cd5fcca476d32dd WatchSource:0}: Error finding container 185d75aef69bbdeebdf6279ce92a149c3f1b4377a6a77a6d1cd5fcca476d32dd: Status 404 returned error can't find the container with id 185d75aef69bbdeebdf6279ce92a149c3f1b4377a6a77a6d1cd5fcca476d32dd Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.352921 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-brxnm" event={"ID":"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c","Type":"ContainerStarted","Data":"db88e9f8590b6b4477cf2b2205804482fd541082af8781e40f7a4817754984b7"} Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.354245 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-vw77m" podStartSLOduration=131.354236493 podStartE2EDuration="2m11.354236493s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:52.352434782 +0000 UTC m=+150.597039588" watchObservedRunningTime="2026-01-20 16:33:52.354236493 +0000 UTC m=+150.598841299" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.358442 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.367473 4995 generic.go:334] "Generic (PLEG): container finished" podID="43165ab8-b1a3-4885-88f6-bc83ef03f454" containerID="34843abfb0b748b41038d6bd4d097fbbf550786c4fb4b98a4cbced79167ba9a7" exitCode=0 Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.367536 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" event={"ID":"43165ab8-b1a3-4885-88f6-bc83ef03f454","Type":"ContainerDied","Data":"34843abfb0b748b41038d6bd4d097fbbf550786c4fb4b98a4cbced79167ba9a7"} Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.377095 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt" event={"ID":"231962b2-f585-4eeb-9477-4fd547c803f4","Type":"ContainerStarted","Data":"d4b793f920148a66e05a01edf65f87ac9e2e72825465c48382b319347f9cf078"} Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.400377 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nmxgc" event={"ID":"b57345df-b284-4e63-b77d-f60534099876","Type":"ContainerStarted","Data":"908d0169fc8074d8798d93527fa5a71467ed74dc5cca5cabcf16f5a03ba0f7a0"} Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.416914 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.417445 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:52.917420969 +0000 UTC m=+151.162025775 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.469712 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-kd9rt" podStartSLOduration=131.469693478 podStartE2EDuration="2m11.469693478s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:52.435567673 +0000 UTC m=+150.680172479" watchObservedRunningTime="2026-01-20 16:33:52.469693478 +0000 UTC m=+150.714298274" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.518622 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.519628 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:53.019612549 +0000 UTC m=+151.264217415 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.542326 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:52 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:52 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:52 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.542494 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.619884 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.620223 4995 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.620227 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:53.120189392 +0000 UTC m=+151.364794198 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.620301 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.620584 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:53.120572494 +0000 UTC m=+151.365177300 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.721484 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.721817 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:53.221799826 +0000 UTC m=+151.466404632 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.750838 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bq48v"] Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.751903 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.761174 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.767195 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bq48v"] Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.823108 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p86s\" (UniqueName: \"kubernetes.io/projected/85820e5b-3be3-43a1-a954-7ce3719e24b5-kube-api-access-4p86s\") pod \"redhat-marketplace-bq48v\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.823180 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.823203 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-utilities\") pod \"redhat-marketplace-bq48v\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.823386 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-catalog-content\") pod \"redhat-marketplace-bq48v\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.823439 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:53.323427419 +0000 UTC m=+151.568032225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.924600 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.924762 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-catalog-content\") pod \"redhat-marketplace-bq48v\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.924798 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p86s\" (UniqueName: \"kubernetes.io/projected/85820e5b-3be3-43a1-a954-7ce3719e24b5-kube-api-access-4p86s\") pod \"redhat-marketplace-bq48v\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.924845 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-utilities\") pod \"redhat-marketplace-bq48v\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.925301 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-utilities\") pod \"redhat-marketplace-bq48v\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:52 crc kubenswrapper[4995]: E0120 16:33:52.925372 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:53.425358551 +0000 UTC m=+151.669963357 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.925563 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-catalog-content\") pod \"redhat-marketplace-bq48v\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:52 crc kubenswrapper[4995]: I0120 16:33:52.957799 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p86s\" (UniqueName: \"kubernetes.io/projected/85820e5b-3be3-43a1-a954-7ce3719e24b5-kube-api-access-4p86s\") pod \"redhat-marketplace-bq48v\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.026615 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:53 crc kubenswrapper[4995]: E0120 16:33:53.026946 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 16:33:53.526931814 +0000 UTC m=+151.771536620 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5944b" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.067038 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.128628 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:53 crc kubenswrapper[4995]: E0120 16:33:53.129172 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:33:53.629153724 +0000 UTC m=+151.873758530 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.146583 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-46s79"] Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.147697 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.148274 4995 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-20T16:33:52.620240934Z","Handler":null,"Name":""} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.162243 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-46s79"] Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.177297 4995 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.177353 4995 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.233792 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxmqb\" (UniqueName: \"kubernetes.io/projected/58893b4f-0622-48ee-bc1d-24ed2b499606-kube-api-access-qxmqb\") pod \"redhat-marketplace-46s79\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.234046 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.234102 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-utilities\") pod \"redhat-marketplace-46s79\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.234140 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-catalog-content\") pod \"redhat-marketplace-46s79\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.244384 4995 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.244430 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.299882 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5944b\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.335279 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.335433 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-utilities\") pod \"redhat-marketplace-46s79\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.335488 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-catalog-content\") pod \"redhat-marketplace-46s79\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.335514 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxmqb\" (UniqueName: \"kubernetes.io/projected/58893b4f-0622-48ee-bc1d-24ed2b499606-kube-api-access-qxmqb\") pod \"redhat-marketplace-46s79\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.336265 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-utilities\") pod \"redhat-marketplace-46s79\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.336356 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-catalog-content\") pod \"redhat-marketplace-46s79\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.346036 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.357198 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxmqb\" (UniqueName: \"kubernetes.io/projected/58893b4f-0622-48ee-bc1d-24ed2b499606-kube-api-access-qxmqb\") pod \"redhat-marketplace-46s79\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.382658 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bq48v"] Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.414414 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" event={"ID":"7bd41743-62a2-4cec-84c0-68fef9f476e4","Type":"ContainerStarted","Data":"5ee10fa5e2c8a5d01c6b7360b63730f133075759eb6bb60d12f537fa9e990e3f"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.423595 4995 generic.go:334] "Generic (PLEG): container finished" podID="7087f2d6-d879-419d-bd93-538d617dcc91" containerID="fc9cd0d4b1c1cc00110675615b53d9bbd17ada7cfdb7e49584e80ddbfa40f31a" exitCode=0 Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.423701 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w9bjs" event={"ID":"7087f2d6-d879-419d-bd93-538d617dcc91","Type":"ContainerDied","Data":"fc9cd0d4b1c1cc00110675615b53d9bbd17ada7cfdb7e49584e80ddbfa40f31a"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.428684 4995 generic.go:334] "Generic (PLEG): container finished" podID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerID="575d6e2443d123c6e77456581fa83c2258462c479142e8fa30425857f268a19e" exitCode=0 Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.428730 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-brxnm" event={"ID":"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c","Type":"ContainerDied","Data":"575d6e2443d123c6e77456581fa83c2258462c479142e8fa30425857f268a19e"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.434373 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq48v" event={"ID":"85820e5b-3be3-43a1-a954-7ce3719e24b5","Type":"ContainerStarted","Data":"e16b43853f8629a9b0655c052979626641d6decaff012ddc2f1fb459994e452e"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.438367 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" podStartSLOduration=133.438353147 podStartE2EDuration="2m13.438353147s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:53.435474065 +0000 UTC m=+151.680078871" watchObservedRunningTime="2026-01-20 16:33:53.438353147 +0000 UTC m=+151.682957943" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.439783 4995 generic.go:334] "Generic (PLEG): container finished" podID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerID="f55fb70576eb5594a3e573f7efaf839622bf1b3788f3236de9dc1d282c194e7a" exitCode=0 Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.439868 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59fd5" event={"ID":"f438d3ed-cdb7-438d-ba13-6ac749c18dea","Type":"ContainerDied","Data":"f55fb70576eb5594a3e573f7efaf839622bf1b3788f3236de9dc1d282c194e7a"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.439893 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59fd5" event={"ID":"f438d3ed-cdb7-438d-ba13-6ac749c18dea","Type":"ContainerStarted","Data":"185d75aef69bbdeebdf6279ce92a149c3f1b4377a6a77a6d1cd5fcca476d32dd"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.442902 4995 generic.go:334] "Generic (PLEG): container finished" podID="b57345df-b284-4e63-b77d-f60534099876" containerID="e94f307c6deeaf0dca03c62c6ee702a15e7971894a14ac597f10e6abf8a5fa36" exitCode=0 Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.442964 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nmxgc" event={"ID":"b57345df-b284-4e63-b77d-f60534099876","Type":"ContainerDied","Data":"e94f307c6deeaf0dca03c62c6ee702a15e7971894a14ac597f10e6abf8a5fa36"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.456748 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hpccj" event={"ID":"a7c35d38-f660-461e-84b5-66be0efed3da","Type":"ContainerStarted","Data":"5d41e93cf0598c0de8a292b8d57c3ae9acf7b3a2677f9cef6f7ec3026b1b10da"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.456783 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hpccj" event={"ID":"a7c35d38-f660-461e-84b5-66be0efed3da","Type":"ContainerStarted","Data":"06afe7e6a52131e0984a0c4e421124cbaae23b070b58df23dc5abe5a8187e803"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.456792 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hpccj" event={"ID":"a7c35d38-f660-461e-84b5-66be0efed3da","Type":"ContainerStarted","Data":"58aee282019f83af41e95ad2ed88151985574a5b81ea37a2a2e7195de22716f1"} Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.467597 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.506672 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-hpccj" podStartSLOduration=10.506648608 podStartE2EDuration="10.506648608s" podCreationTimestamp="2026-01-20 16:33:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:53.505380972 +0000 UTC m=+151.749985778" watchObservedRunningTime="2026-01-20 16:33:53.506648608 +0000 UTC m=+151.751253414" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.543819 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:53 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:53 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:53 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.543903 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.577856 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.721689 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.765296 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-z4tw2"] Jan 20 16:33:53 crc kubenswrapper[4995]: E0120 16:33:53.766899 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43165ab8-b1a3-4885-88f6-bc83ef03f454" containerName="collect-profiles" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.766910 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="43165ab8-b1a3-4885-88f6-bc83ef03f454" containerName="collect-profiles" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.767020 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="43165ab8-b1a3-4885-88f6-bc83ef03f454" containerName="collect-profiles" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.767823 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.773291 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-z4tw2"] Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.779869 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.850811 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlm98\" (UniqueName: \"kubernetes.io/projected/43165ab8-b1a3-4885-88f6-bc83ef03f454-kube-api-access-rlm98\") pod \"43165ab8-b1a3-4885-88f6-bc83ef03f454\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.850883 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/43165ab8-b1a3-4885-88f6-bc83ef03f454-secret-volume\") pod \"43165ab8-b1a3-4885-88f6-bc83ef03f454\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.850949 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/43165ab8-b1a3-4885-88f6-bc83ef03f454-config-volume\") pod \"43165ab8-b1a3-4885-88f6-bc83ef03f454\" (UID: \"43165ab8-b1a3-4885-88f6-bc83ef03f454\") " Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.851177 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-utilities\") pod \"redhat-operators-z4tw2\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.851244 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-catalog-content\") pod \"redhat-operators-z4tw2\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.851616 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2wq7\" (UniqueName: \"kubernetes.io/projected/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-kube-api-access-n2wq7\") pod \"redhat-operators-z4tw2\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.852713 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43165ab8-b1a3-4885-88f6-bc83ef03f454-config-volume" (OuterVolumeSpecName: "config-volume") pod "43165ab8-b1a3-4885-88f6-bc83ef03f454" (UID: "43165ab8-b1a3-4885-88f6-bc83ef03f454"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.861221 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43165ab8-b1a3-4885-88f6-bc83ef03f454-kube-api-access-rlm98" (OuterVolumeSpecName: "kube-api-access-rlm98") pod "43165ab8-b1a3-4885-88f6-bc83ef03f454" (UID: "43165ab8-b1a3-4885-88f6-bc83ef03f454"). InnerVolumeSpecName "kube-api-access-rlm98". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.861393 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43165ab8-b1a3-4885-88f6-bc83ef03f454-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "43165ab8-b1a3-4885-88f6-bc83ef03f454" (UID: "43165ab8-b1a3-4885-88f6-bc83ef03f454"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.952991 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-catalog-content\") pod \"redhat-operators-z4tw2\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.953032 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2wq7\" (UniqueName: \"kubernetes.io/projected/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-kube-api-access-n2wq7\") pod \"redhat-operators-z4tw2\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.953107 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-utilities\") pod \"redhat-operators-z4tw2\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.953140 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/43165ab8-b1a3-4885-88f6-bc83ef03f454-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.953151 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlm98\" (UniqueName: \"kubernetes.io/projected/43165ab8-b1a3-4885-88f6-bc83ef03f454-kube-api-access-rlm98\") on node \"crc\" DevicePath \"\"" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.953161 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/43165ab8-b1a3-4885-88f6-bc83ef03f454-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.953561 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-utilities\") pod \"redhat-operators-z4tw2\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.953762 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-catalog-content\") pod \"redhat-operators-z4tw2\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.970336 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2wq7\" (UniqueName: \"kubernetes.io/projected/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-kube-api-access-n2wq7\") pod \"redhat-operators-z4tw2\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:53 crc kubenswrapper[4995]: I0120 16:33:53.995406 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.037742 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-46s79"] Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.089413 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.114244 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5944b"] Jan 20 16:33:54 crc kubenswrapper[4995]: W0120 16:33:54.126955 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e121cf7_8b43_4ac2_8f76_9376b6e97ccf.slice/crio-2d4e21a15a77791b35cbdd9b837551f83b2fd81efd23f054567b19dc67c26a65 WatchSource:0}: Error finding container 2d4e21a15a77791b35cbdd9b837551f83b2fd81efd23f054567b19dc67c26a65: Status 404 returned error can't find the container with id 2d4e21a15a77791b35cbdd9b837551f83b2fd81efd23f054567b19dc67c26a65 Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.144173 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-twpdq"] Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.145068 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.153951 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-twpdq"] Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.256368 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gtvf\" (UniqueName: \"kubernetes.io/projected/45b2120d-74d7-4b92-90c6-18b7bbe7375e-kube-api-access-5gtvf\") pod \"redhat-operators-twpdq\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.256769 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-catalog-content\") pod \"redhat-operators-twpdq\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.256801 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-utilities\") pod \"redhat-operators-twpdq\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.323329 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-92c94" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.359613 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gtvf\" (UniqueName: \"kubernetes.io/projected/45b2120d-74d7-4b92-90c6-18b7bbe7375e-kube-api-access-5gtvf\") pod \"redhat-operators-twpdq\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.359677 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-catalog-content\") pod \"redhat-operators-twpdq\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.359709 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-utilities\") pod \"redhat-operators-twpdq\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.360204 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-utilities\") pod \"redhat-operators-twpdq\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.360405 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-catalog-content\") pod \"redhat-operators-twpdq\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.381909 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gtvf\" (UniqueName: \"kubernetes.io/projected/45b2120d-74d7-4b92-90c6-18b7bbe7375e-kube-api-access-5gtvf\") pod \"redhat-operators-twpdq\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.462532 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.463018 4995 generic.go:334] "Generic (PLEG): container finished" podID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerID="4a39e94808794f5fecd6e6c88841b51b80a41a4dca9152ac3e28eb5865f9c0fb" exitCode=0 Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.463109 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq48v" event={"ID":"85820e5b-3be3-43a1-a954-7ce3719e24b5","Type":"ContainerDied","Data":"4a39e94808794f5fecd6e6c88841b51b80a41a4dca9152ac3e28eb5865f9c0fb"} Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.467000 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" event={"ID":"43165ab8-b1a3-4885-88f6-bc83ef03f454","Type":"ContainerDied","Data":"fc33b36f0507816deb12e603bb0a954e6793c5924a0dd7d01460467335d5a132"} Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.467044 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc33b36f0507816deb12e603bb0a954e6793c5924a0dd7d01460467335d5a132" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.467058 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.468714 4995 generic.go:334] "Generic (PLEG): container finished" podID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerID="5333c9e48b15a0c2a42dfb3199feab7e7aa721556f72c2c30bc6f04e5e742e06" exitCode=0 Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.468768 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s79" event={"ID":"58893b4f-0622-48ee-bc1d-24ed2b499606","Type":"ContainerDied","Data":"5333c9e48b15a0c2a42dfb3199feab7e7aa721556f72c2c30bc6f04e5e742e06"} Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.468790 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s79" event={"ID":"58893b4f-0622-48ee-bc1d-24ed2b499606","Type":"ContainerStarted","Data":"7b7e2788485e95537778c6e07cedc9b4279ff4ba9a3b70d58b6eef3f958084fa"} Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.477812 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" event={"ID":"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf","Type":"ContainerStarted","Data":"3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350"} Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.477870 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" event={"ID":"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf","Type":"ContainerStarted","Data":"2d4e21a15a77791b35cbdd9b837551f83b2fd81efd23f054567b19dc67c26a65"} Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.495083 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-z4tw2"] Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.501512 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" podStartSLOduration=133.501486088 podStartE2EDuration="2m13.501486088s" podCreationTimestamp="2026-01-20 16:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:33:54.501246061 +0000 UTC m=+152.745850867" watchObservedRunningTime="2026-01-20 16:33:54.501486088 +0000 UTC m=+152.746090894" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.538709 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:54 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:54 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:54 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.538766 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:54 crc kubenswrapper[4995]: I0120 16:33:54.825914 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-twpdq"] Jan 20 16:33:55 crc kubenswrapper[4995]: I0120 16:33:55.488060 4995 generic.go:334] "Generic (PLEG): container finished" podID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerID="592bcc41ec7013de722f599526a52bc4ee19f36671744ae6b10b2ee8dbf7d5f2" exitCode=0 Jan 20 16:33:55 crc kubenswrapper[4995]: I0120 16:33:55.488119 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4tw2" event={"ID":"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0","Type":"ContainerDied","Data":"592bcc41ec7013de722f599526a52bc4ee19f36671744ae6b10b2ee8dbf7d5f2"} Jan 20 16:33:55 crc kubenswrapper[4995]: I0120 16:33:55.488159 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4tw2" event={"ID":"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0","Type":"ContainerStarted","Data":"ea848ff5a0b74dcf4478bf439d09714f7e5755476db3dd18efd629b975b95e68"} Jan 20 16:33:55 crc kubenswrapper[4995]: I0120 16:33:55.496494 4995 generic.go:334] "Generic (PLEG): container finished" podID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerID="fb898b142ea965b32189786ce0029b743aca5ac0c25057d12f7f63ac010f6bdf" exitCode=0 Jan 20 16:33:55 crc kubenswrapper[4995]: I0120 16:33:55.496534 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-twpdq" event={"ID":"45b2120d-74d7-4b92-90c6-18b7bbe7375e","Type":"ContainerDied","Data":"fb898b142ea965b32189786ce0029b743aca5ac0c25057d12f7f63ac010f6bdf"} Jan 20 16:33:55 crc kubenswrapper[4995]: I0120 16:33:55.496584 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-twpdq" event={"ID":"45b2120d-74d7-4b92-90c6-18b7bbe7375e","Type":"ContainerStarted","Data":"c836fe7568026cb8af4baf60858ec3e2a3b54fec325504be90d3502bab5e28a4"} Jan 20 16:33:55 crc kubenswrapper[4995]: I0120 16:33:55.496767 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:33:55 crc kubenswrapper[4995]: I0120 16:33:55.539919 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:55 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:55 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:55 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:55 crc kubenswrapper[4995]: I0120 16:33:55.539983 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.009289 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.009577 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.011431 4995 patch_prober.go:28] interesting pod/console-f9d7485db-jzs8c container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.011469 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-jzs8c" podUID="2cd4e02b-cb10-4bb2-b318-d24372346b1d" containerName="console" probeResult="failure" output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.536596 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.538996 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:56 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:56 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:56 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.539051 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.665709 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.666662 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.676135 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.731964 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.733734 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.733833 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.735731 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.735735 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.848437 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.848481 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.855713 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.898739 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:33:56 crc kubenswrapper[4995]: I0120 16:33:56.898794 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:56.999645 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:56.999727 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.000865 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.028106 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.057619 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.511598 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-6wxvh" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.512650 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5mmhx" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.541801 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:57 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:57 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:57 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.541844 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.983441 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.984226 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.987276 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.987373 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 20 16:33:57 crc kubenswrapper[4995]: I0120 16:33:57.988374 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 20 16:33:58 crc kubenswrapper[4995]: I0120 16:33:58.117595 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:33:58 crc kubenswrapper[4995]: I0120 16:33:58.117639 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:33:58 crc kubenswrapper[4995]: I0120 16:33:58.218784 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:33:58 crc kubenswrapper[4995]: I0120 16:33:58.218916 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:33:58 crc kubenswrapper[4995]: I0120 16:33:58.219162 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:33:58 crc kubenswrapper[4995]: I0120 16:33:58.239671 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:33:58 crc kubenswrapper[4995]: I0120 16:33:58.314456 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:33:58 crc kubenswrapper[4995]: I0120 16:33:58.539902 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:58 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:58 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:58 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:58 crc kubenswrapper[4995]: I0120 16:33:58.539958 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:33:59 crc kubenswrapper[4995]: I0120 16:33:59.538194 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:33:59 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:33:59 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:33:59 crc kubenswrapper[4995]: healthz check failed Jan 20 16:33:59 crc kubenswrapper[4995]: I0120 16:33:59.538294 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:34:00 crc kubenswrapper[4995]: I0120 16:34:00.538093 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:34:00 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:34:00 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:34:00 crc kubenswrapper[4995]: healthz check failed Jan 20 16:34:00 crc kubenswrapper[4995]: I0120 16:34:00.538168 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:34:00 crc kubenswrapper[4995]: I0120 16:34:00.571810 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:34:00 crc kubenswrapper[4995]: I0120 16:34:00.571864 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:34:01 crc kubenswrapper[4995]: I0120 16:34:01.538532 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:34:01 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:34:01 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:34:01 crc kubenswrapper[4995]: healthz check failed Jan 20 16:34:01 crc kubenswrapper[4995]: I0120 16:34:01.538881 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:34:01 crc kubenswrapper[4995]: I0120 16:34:01.902456 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-gdlqb" Jan 20 16:34:02 crc kubenswrapper[4995]: I0120 16:34:02.540322 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:34:02 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:34:02 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:34:02 crc kubenswrapper[4995]: healthz check failed Jan 20 16:34:02 crc kubenswrapper[4995]: I0120 16:34:02.540387 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:34:02 crc kubenswrapper[4995]: I0120 16:34:02.895067 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:34:02 crc kubenswrapper[4995]: I0120 16:34:02.899680 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9dfc8bb5-28e8-4ba3-8009-09d5585a1a12-metrics-certs\") pod \"network-metrics-daemon-kbdtf\" (UID: \"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12\") " pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:34:02 crc kubenswrapper[4995]: I0120 16:34:02.915674 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kbdtf" Jan 20 16:34:03 crc kubenswrapper[4995]: I0120 16:34:03.541651 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:34:03 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:34:03 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:34:03 crc kubenswrapper[4995]: healthz check failed Jan 20 16:34:03 crc kubenswrapper[4995]: I0120 16:34:03.541752 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:34:04 crc kubenswrapper[4995]: I0120 16:34:04.540617 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:34:04 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:34:04 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:34:04 crc kubenswrapper[4995]: healthz check failed Jan 20 16:34:04 crc kubenswrapper[4995]: I0120 16:34:04.541002 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:34:05 crc kubenswrapper[4995]: I0120 16:34:05.538460 4995 patch_prober.go:28] interesting pod/router-default-5444994796-9m9xl container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 16:34:05 crc kubenswrapper[4995]: [-]has-synced failed: reason withheld Jan 20 16:34:05 crc kubenswrapper[4995]: [+]process-running ok Jan 20 16:34:05 crc kubenswrapper[4995]: healthz check failed Jan 20 16:34:05 crc kubenswrapper[4995]: I0120 16:34:05.538589 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-9m9xl" podUID="9ccd2a3c-8848-42cb-96fa-a9df5a60a729" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 16:34:06 crc kubenswrapper[4995]: I0120 16:34:06.009926 4995 patch_prober.go:28] interesting pod/console-f9d7485db-jzs8c container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Jan 20 16:34:06 crc kubenswrapper[4995]: I0120 16:34:06.009983 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-jzs8c" podUID="2cd4e02b-cb10-4bb2-b318-d24372346b1d" containerName="console" probeResult="failure" output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" Jan 20 16:34:06 crc kubenswrapper[4995]: I0120 16:34:06.552306 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:34:06 crc kubenswrapper[4995]: I0120 16:34:06.559519 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-9m9xl" Jan 20 16:34:10 crc kubenswrapper[4995]: I0120 16:34:10.478888 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-prbmb"] Jan 20 16:34:10 crc kubenswrapper[4995]: I0120 16:34:10.480243 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" podUID="c8c61473-94e6-460c-a307-1b2f727a24ed" containerName="controller-manager" containerID="cri-o://a9465b4d4875f95c86ee702c174e411ad32f1c2e34d27f9be90c9d984680d589" gracePeriod=30 Jan 20 16:34:10 crc kubenswrapper[4995]: I0120 16:34:10.493759 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4"] Jan 20 16:34:10 crc kubenswrapper[4995]: I0120 16:34:10.495070 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" podUID="a0d62048-3df5-4737-adc3-5544e1402f06" containerName="route-controller-manager" containerID="cri-o://d93c17ca4aed91a097a31acb389998fa5ef17daa12c846eb0284383d8c716e42" gracePeriod=30 Jan 20 16:34:11 crc kubenswrapper[4995]: I0120 16:34:11.594109 4995 generic.go:334] "Generic (PLEG): container finished" podID="c8c61473-94e6-460c-a307-1b2f727a24ed" containerID="a9465b4d4875f95c86ee702c174e411ad32f1c2e34d27f9be90c9d984680d589" exitCode=0 Jan 20 16:34:11 crc kubenswrapper[4995]: I0120 16:34:11.594165 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" event={"ID":"c8c61473-94e6-460c-a307-1b2f727a24ed","Type":"ContainerDied","Data":"a9465b4d4875f95c86ee702c174e411ad32f1c2e34d27f9be90c9d984680d589"} Jan 20 16:34:11 crc kubenswrapper[4995]: I0120 16:34:11.595532 4995 generic.go:334] "Generic (PLEG): container finished" podID="a0d62048-3df5-4737-adc3-5544e1402f06" containerID="d93c17ca4aed91a097a31acb389998fa5ef17daa12c846eb0284383d8c716e42" exitCode=0 Jan 20 16:34:11 crc kubenswrapper[4995]: I0120 16:34:11.595558 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" event={"ID":"a0d62048-3df5-4737-adc3-5544e1402f06","Type":"ContainerDied","Data":"d93c17ca4aed91a097a31acb389998fa5ef17daa12c846eb0284383d8c716e42"} Jan 20 16:34:13 crc kubenswrapper[4995]: I0120 16:34:13.590034 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:34:16 crc kubenswrapper[4995]: I0120 16:34:16.014822 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:34:16 crc kubenswrapper[4995]: I0120 16:34:16.022753 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:34:17 crc kubenswrapper[4995]: I0120 16:34:17.206366 4995 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-prbmb container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 20 16:34:17 crc kubenswrapper[4995]: I0120 16:34:17.206749 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" podUID="c8c61473-94e6-460c-a307-1b2f727a24ed" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 20 16:34:17 crc kubenswrapper[4995]: I0120 16:34:17.522337 4995 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-67sf4 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 20 16:34:17 crc kubenswrapper[4995]: I0120 16:34:17.522995 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" podUID="a0d62048-3df5-4737-adc3-5544e1402f06" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 20 16:34:26 crc kubenswrapper[4995]: I0120 16:34:26.260926 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-wxtx6" Jan 20 16:34:27 crc kubenswrapper[4995]: I0120 16:34:27.206491 4995 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-prbmb container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 20 16:34:27 crc kubenswrapper[4995]: I0120 16:34:27.206567 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" podUID="c8c61473-94e6-460c-a307-1b2f727a24ed" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.33:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 20 16:34:27 crc kubenswrapper[4995]: I0120 16:34:27.522249 4995 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-67sf4 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 20 16:34:27 crc kubenswrapper[4995]: I0120 16:34:27.522334 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" podUID="a0d62048-3df5-4737-adc3-5544e1402f06" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 20 16:34:28 crc kubenswrapper[4995]: I0120 16:34:28.302643 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 16:34:30 crc kubenswrapper[4995]: I0120 16:34:30.571550 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:34:30 crc kubenswrapper[4995]: I0120 16:34:30.571625 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.212473 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.213914 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.219809 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.339505 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cd88881d-ef13-4436-bcb4-0abad299bd29-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"cd88881d-ef13-4436-bcb4-0abad299bd29\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.339891 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cd88881d-ef13-4436-bcb4-0abad299bd29-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"cd88881d-ef13-4436-bcb4-0abad299bd29\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.440185 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cd88881d-ef13-4436-bcb4-0abad299bd29-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"cd88881d-ef13-4436-bcb4-0abad299bd29\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.440237 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cd88881d-ef13-4436-bcb4-0abad299bd29-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"cd88881d-ef13-4436-bcb4-0abad299bd29\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.440328 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cd88881d-ef13-4436-bcb4-0abad299bd29-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"cd88881d-ef13-4436-bcb4-0abad299bd29\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.458581 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cd88881d-ef13-4436-bcb4-0abad299bd29-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"cd88881d-ef13-4436-bcb4-0abad299bd29\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:33 crc kubenswrapper[4995]: I0120 16:34:33.551285 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.180314 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.186847 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.188217 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.188350 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zcjdk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-w9bjs_openshift-marketplace(7087f2d6-d879-419d-bd93-538d617dcc91): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.190803 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-w9bjs" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.213457 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf"] Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.213814 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0d62048-3df5-4737-adc3-5544e1402f06" containerName="route-controller-manager" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.213857 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0d62048-3df5-4737-adc3-5544e1402f06" containerName="route-controller-manager" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.213886 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8c61473-94e6-460c-a307-1b2f727a24ed" containerName="controller-manager" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.213895 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8c61473-94e6-460c-a307-1b2f727a24ed" containerName="controller-manager" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.214209 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0d62048-3df5-4737-adc3-5544e1402f06" containerName="route-controller-manager" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.214232 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8c61473-94e6-460c-a307-1b2f727a24ed" containerName="controller-manager" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.214917 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.264937 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.265095 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-22dg9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-brxnm_openshift-marketplace(1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.265202 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.265396 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9t9wt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-nmxgc_openshift-marketplace(b57345df-b284-4e63-b77d-f60534099876): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.266169 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-brxnm" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" Jan 20 16:34:34 crc kubenswrapper[4995]: E0120 16:34:34.266550 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-nmxgc" podUID="b57345df-b284-4e63-b77d-f60534099876" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.269768 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf"] Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.363936 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-config\") pod \"a0d62048-3df5-4737-adc3-5544e1402f06\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364001 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-client-ca\") pod \"c8c61473-94e6-460c-a307-1b2f727a24ed\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364043 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-proxy-ca-bundles\") pod \"c8c61473-94e6-460c-a307-1b2f727a24ed\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364098 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-client-ca\") pod \"a0d62048-3df5-4737-adc3-5544e1402f06\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364122 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8c61473-94e6-460c-a307-1b2f727a24ed-serving-cert\") pod \"c8c61473-94e6-460c-a307-1b2f727a24ed\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364160 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9czh\" (UniqueName: \"kubernetes.io/projected/c8c61473-94e6-460c-a307-1b2f727a24ed-kube-api-access-g9czh\") pod \"c8c61473-94e6-460c-a307-1b2f727a24ed\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364183 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-config\") pod \"c8c61473-94e6-460c-a307-1b2f727a24ed\" (UID: \"c8c61473-94e6-460c-a307-1b2f727a24ed\") " Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364215 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0d62048-3df5-4737-adc3-5544e1402f06-serving-cert\") pod \"a0d62048-3df5-4737-adc3-5544e1402f06\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364242 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkqcq\" (UniqueName: \"kubernetes.io/projected/a0d62048-3df5-4737-adc3-5544e1402f06-kube-api-access-vkqcq\") pod \"a0d62048-3df5-4737-adc3-5544e1402f06\" (UID: \"a0d62048-3df5-4737-adc3-5544e1402f06\") " Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364491 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jqnl\" (UniqueName: \"kubernetes.io/projected/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-kube-api-access-9jqnl\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364543 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-serving-cert\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364586 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-config\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364627 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-client-ca\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364835 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-client-ca" (OuterVolumeSpecName: "client-ca") pod "c8c61473-94e6-460c-a307-1b2f727a24ed" (UID: "c8c61473-94e6-460c-a307-1b2f727a24ed"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.364840 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-config" (OuterVolumeSpecName: "config") pod "a0d62048-3df5-4737-adc3-5544e1402f06" (UID: "a0d62048-3df5-4737-adc3-5544e1402f06"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.365129 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c8c61473-94e6-460c-a307-1b2f727a24ed" (UID: "c8c61473-94e6-460c-a307-1b2f727a24ed"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.365950 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-client-ca" (OuterVolumeSpecName: "client-ca") pod "a0d62048-3df5-4737-adc3-5544e1402f06" (UID: "a0d62048-3df5-4737-adc3-5544e1402f06"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.366521 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-config" (OuterVolumeSpecName: "config") pod "c8c61473-94e6-460c-a307-1b2f727a24ed" (UID: "c8c61473-94e6-460c-a307-1b2f727a24ed"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.369790 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0d62048-3df5-4737-adc3-5544e1402f06-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a0d62048-3df5-4737-adc3-5544e1402f06" (UID: "a0d62048-3df5-4737-adc3-5544e1402f06"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.372328 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8c61473-94e6-460c-a307-1b2f727a24ed-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c8c61473-94e6-460c-a307-1b2f727a24ed" (UID: "c8c61473-94e6-460c-a307-1b2f727a24ed"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.378472 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0d62048-3df5-4737-adc3-5544e1402f06-kube-api-access-vkqcq" (OuterVolumeSpecName: "kube-api-access-vkqcq") pod "a0d62048-3df5-4737-adc3-5544e1402f06" (UID: "a0d62048-3df5-4737-adc3-5544e1402f06"). InnerVolumeSpecName "kube-api-access-vkqcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.379048 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8c61473-94e6-460c-a307-1b2f727a24ed-kube-api-access-g9czh" (OuterVolumeSpecName: "kube-api-access-g9czh") pod "c8c61473-94e6-460c-a307-1b2f727a24ed" (UID: "c8c61473-94e6-460c-a307-1b2f727a24ed"). InnerVolumeSpecName "kube-api-access-g9czh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466062 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-config\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466138 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-client-ca\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466180 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jqnl\" (UniqueName: \"kubernetes.io/projected/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-kube-api-access-9jqnl\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466208 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-serving-cert\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466245 4995 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466256 4995 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466267 4995 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466275 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c8c61473-94e6-460c-a307-1b2f727a24ed-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466285 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9czh\" (UniqueName: \"kubernetes.io/projected/c8c61473-94e6-460c-a307-1b2f727a24ed-kube-api-access-g9czh\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466293 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8c61473-94e6-460c-a307-1b2f727a24ed-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466302 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a0d62048-3df5-4737-adc3-5544e1402f06-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466310 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkqcq\" (UniqueName: \"kubernetes.io/projected/a0d62048-3df5-4737-adc3-5544e1402f06-kube-api-access-vkqcq\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.466319 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a0d62048-3df5-4737-adc3-5544e1402f06-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.467499 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-client-ca\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.469562 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-config\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.470544 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-serving-cert\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.482799 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jqnl\" (UniqueName: \"kubernetes.io/projected/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-kube-api-access-9jqnl\") pod \"route-controller-manager-65bdd9b95-fnnqf\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.535585 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.548136 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.720980 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" event={"ID":"c8c61473-94e6-460c-a307-1b2f727a24ed","Type":"ContainerDied","Data":"20ffa9ca3e34f3246b2befe6dad017972a266921c3af9cf00a3a8cecd5f4608c"} Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.721040 4995 scope.go:117] "RemoveContainer" containerID="a9465b4d4875f95c86ee702c174e411ad32f1c2e34d27f9be90c9d984680d589" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.721233 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-prbmb" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.724621 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" event={"ID":"a0d62048-3df5-4737-adc3-5544e1402f06","Type":"ContainerDied","Data":"ec953025fceb01c7f8b1cd81be63b574a21f9e8fb7496c8e64f02e63241d30aa"} Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.724728 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4" Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.755105 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-prbmb"] Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.757458 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-prbmb"] Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.782498 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4"] Jan 20 16:34:34 crc kubenswrapper[4995]: I0120 16:34:34.786122 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-67sf4"] Jan 20 16:34:35 crc kubenswrapper[4995]: I0120 16:34:35.995585 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0d62048-3df5-4737-adc3-5544e1402f06" path="/var/lib/kubelet/pods/a0d62048-3df5-4737-adc3-5544e1402f06/volumes" Jan 20 16:34:35 crc kubenswrapper[4995]: I0120 16:34:35.996560 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8c61473-94e6-460c-a307-1b2f727a24ed" path="/var/lib/kubelet/pods/c8c61473-94e6-460c-a307-1b2f727a24ed/volumes" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.111349 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-645998df4b-bdtk6"] Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.112297 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.117341 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.117446 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.118140 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.118257 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.128551 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.129784 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.133515 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.137119 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-645998df4b-bdtk6"] Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.301462 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghk76\" (UniqueName: \"kubernetes.io/projected/59301323-d997-4af1-b2b0-4e8f0e7356f4-kube-api-access-ghk76\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.301506 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-config\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.301560 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59301323-d997-4af1-b2b0-4e8f0e7356f4-serving-cert\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.301589 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-proxy-ca-bundles\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.301621 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-client-ca\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.402769 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59301323-d997-4af1-b2b0-4e8f0e7356f4-serving-cert\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.402823 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-proxy-ca-bundles\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.402860 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-client-ca\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.402888 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghk76\" (UniqueName: \"kubernetes.io/projected/59301323-d997-4af1-b2b0-4e8f0e7356f4-kube-api-access-ghk76\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.402907 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-config\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.404971 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-config\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.405842 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-proxy-ca-bundles\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.407755 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-client-ca\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.412722 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59301323-d997-4af1-b2b0-4e8f0e7356f4-serving-cert\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.431642 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghk76\" (UniqueName: \"kubernetes.io/projected/59301323-d997-4af1-b2b0-4e8f0e7356f4-kube-api-access-ghk76\") pod \"controller-manager-645998df4b-bdtk6\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: I0120 16:34:37.445612 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:37 crc kubenswrapper[4995]: E0120 16:34:37.515790 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-brxnm" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" Jan 20 16:34:37 crc kubenswrapper[4995]: E0120 16:34:37.515799 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-nmxgc" podUID="b57345df-b284-4e63-b77d-f60534099876" Jan 20 16:34:37 crc kubenswrapper[4995]: E0120 16:34:37.515899 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-w9bjs" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" Jan 20 16:34:37 crc kubenswrapper[4995]: E0120 16:34:37.545687 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 20 16:34:37 crc kubenswrapper[4995]: E0120 16:34:37.546117 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5gtvf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-twpdq_openshift-marketplace(45b2120d-74d7-4b92-90c6-18b7bbe7375e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 16:34:37 crc kubenswrapper[4995]: E0120 16:34:37.547565 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-twpdq" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" Jan 20 16:34:37 crc kubenswrapper[4995]: E0120 16:34:37.575866 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 20 16:34:37 crc kubenswrapper[4995]: E0120 16:34:37.576009 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n2wq7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-z4tw2_openshift-marketplace(affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 16:34:37 crc kubenswrapper[4995]: E0120 16:34:37.577358 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-z4tw2" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.180552 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.181653 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.204180 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.212815 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kubelet-dir\") pod \"installer-9-crc\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.212853 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-var-lock\") pod \"installer-9-crc\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.212911 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kube-api-access\") pod \"installer-9-crc\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.314165 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kubelet-dir\") pod \"installer-9-crc\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.314226 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-var-lock\") pod \"installer-9-crc\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.314281 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kube-api-access\") pod \"installer-9-crc\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.314288 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kubelet-dir\") pod \"installer-9-crc\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.314382 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-var-lock\") pod \"installer-9-crc\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.330431 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kube-api-access\") pod \"installer-9-crc\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:38 crc kubenswrapper[4995]: I0120 16:34:38.522287 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:34:39 crc kubenswrapper[4995]: W0120 16:34:39.136602 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod0fce2f43_2741_49ce_88e7_58ef64ff24c7.slice/crio-e95b09962b00c727a0d6f886edaeaf47cab070cfe07f090f1f5d188a904f3f08 WatchSource:0}: Error finding container e95b09962b00c727a0d6f886edaeaf47cab070cfe07f090f1f5d188a904f3f08: Status 404 returned error can't find the container with id e95b09962b00c727a0d6f886edaeaf47cab070cfe07f090f1f5d188a904f3f08 Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.151941 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-twpdq" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.151941 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-z4tw2" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.152137 4995 scope.go:117] "RemoveContainer" containerID="d93c17ca4aed91a097a31acb389998fa5ef17daa12c846eb0284383d8c716e42" Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.228198 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.228569 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qxmqb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-46s79_openshift-marketplace(58893b4f-0622-48ee-bc1d-24ed2b499606): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.230808 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-46s79" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.259221 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.259354 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4p86s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-bq48v_openshift-marketplace(85820e5b-3be3-43a1-a954-7ce3719e24b5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.263179 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-bq48v" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.548940 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 20 16:34:39 crc kubenswrapper[4995]: W0120 16:34:39.558926 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod6915ccd2_d29a_4536_9a92_443f2c21c1c0.slice/crio-408c20fa5b6b6ed5a9330c984d684c8e16168256d2622fab7316d26593eecc61 WatchSource:0}: Error finding container 408c20fa5b6b6ed5a9330c984d684c8e16168256d2622fab7316d26593eecc61: Status 404 returned error can't find the container with id 408c20fa5b6b6ed5a9330c984d684c8e16168256d2622fab7316d26593eecc61 Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.610275 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 20 16:34:39 crc kubenswrapper[4995]: W0120 16:34:39.619071 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podcd88881d_ef13_4436_bcb4_0abad299bd29.slice/crio-a4b2753522ef37ee2f1a8194ce89852979990cac1f0e31d34ef8b8f7e8b0273d WatchSource:0}: Error finding container a4b2753522ef37ee2f1a8194ce89852979990cac1f0e31d34ef8b8f7e8b0273d: Status 404 returned error can't find the container with id a4b2753522ef37ee2f1a8194ce89852979990cac1f0e31d34ef8b8f7e8b0273d Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.695555 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf"] Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.699343 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-kbdtf"] Jan 20 16:34:39 crc kubenswrapper[4995]: W0120 16:34:39.700617 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9dfc8bb5_28e8_4ba3_8009_09d5585a1a12.slice/crio-5bb98ae7e07a45634146a12e075337a9c032b4f5ef282325a0f916a9c697768e WatchSource:0}: Error finding container 5bb98ae7e07a45634146a12e075337a9c032b4f5ef282325a0f916a9c697768e: Status 404 returned error can't find the container with id 5bb98ae7e07a45634146a12e075337a9c032b4f5ef282325a0f916a9c697768e Jan 20 16:34:39 crc kubenswrapper[4995]: W0120 16:34:39.703275 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d16567b_c923_4e03_9c9b_741c6aa9f1b8.slice/crio-83c083eb9e948215570ce89a5d4e1584034fb0d6023c08966ee18a6f9d4bbb75 WatchSource:0}: Error finding container 83c083eb9e948215570ce89a5d4e1584034fb0d6023c08966ee18a6f9d4bbb75: Status 404 returned error can't find the container with id 83c083eb9e948215570ce89a5d4e1584034fb0d6023c08966ee18a6f9d4bbb75 Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.741174 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-645998df4b-bdtk6"] Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.754495 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 20 16:34:39 crc kubenswrapper[4995]: W0120 16:34:39.755498 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59301323_d997_4af1_b2b0_4e8f0e7356f4.slice/crio-6a7c5453f95f26b22df62567787753e71709450eae2dc7ca99ceb54ff55160db WatchSource:0}: Error finding container 6a7c5453f95f26b22df62567787753e71709450eae2dc7ca99ceb54ff55160db: Status 404 returned error can't find the container with id 6a7c5453f95f26b22df62567787753e71709450eae2dc7ca99ceb54ff55160db Jan 20 16:34:39 crc kubenswrapper[4995]: W0120 16:34:39.757204 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod758090f5_45ed_44f6_9fdc_0af9eac7d6ea.slice/crio-74667134e68478dbfc8ebf12d28f24c124fa99e82ecc176c173078bc77af398f WatchSource:0}: Error finding container 74667134e68478dbfc8ebf12d28f24c124fa99e82ecc176c173078bc77af398f: Status 404 returned error can't find the container with id 74667134e68478dbfc8ebf12d28f24c124fa99e82ecc176c173078bc77af398f Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.759565 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0fce2f43-2741-49ce-88e7-58ef64ff24c7","Type":"ContainerStarted","Data":"e95b09962b00c727a0d6f886edaeaf47cab070cfe07f090f1f5d188a904f3f08"} Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.760902 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6915ccd2-d29a-4536-9a92-443f2c21c1c0","Type":"ContainerStarted","Data":"408c20fa5b6b6ed5a9330c984d684c8e16168256d2622fab7316d26593eecc61"} Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.764492 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59fd5" event={"ID":"f438d3ed-cdb7-438d-ba13-6ac749c18dea","Type":"ContainerStarted","Data":"96c9b9241763ecce10b957e6f626525f611fc489f318843643efcc4213655b3f"} Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.767830 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" event={"ID":"2d16567b-c923-4e03-9c9b-741c6aa9f1b8","Type":"ContainerStarted","Data":"83c083eb9e948215570ce89a5d4e1584034fb0d6023c08966ee18a6f9d4bbb75"} Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.770361 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" event={"ID":"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12","Type":"ContainerStarted","Data":"5bb98ae7e07a45634146a12e075337a9c032b4f5ef282325a0f916a9c697768e"} Jan 20 16:34:39 crc kubenswrapper[4995]: I0120 16:34:39.773526 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"cd88881d-ef13-4436-bcb4-0abad299bd29","Type":"ContainerStarted","Data":"a4b2753522ef37ee2f1a8194ce89852979990cac1f0e31d34ef8b8f7e8b0273d"} Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.810294 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-46s79" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" Jan 20 16:34:39 crc kubenswrapper[4995]: E0120 16:34:39.810530 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-bq48v" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.781284 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"758090f5-45ed-44f6-9fdc-0af9eac7d6ea","Type":"ContainerStarted","Data":"b0f5655a3bc0a706fd14ae5f3ebc225b155bc17171c6b2019a4922130d279b61"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.781878 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"758090f5-45ed-44f6-9fdc-0af9eac7d6ea","Type":"ContainerStarted","Data":"74667134e68478dbfc8ebf12d28f24c124fa99e82ecc176c173078bc77af398f"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.782888 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" event={"ID":"2d16567b-c923-4e03-9c9b-741c6aa9f1b8","Type":"ContainerStarted","Data":"747f06597fab16f01759e752db972f8b8e4679e8088765be040cc895c99a011c"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.783111 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.785694 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" event={"ID":"59301323-d997-4af1-b2b0-4e8f0e7356f4","Type":"ContainerStarted","Data":"1713f60aad76c2d000ae5630d50a1e7b5631c5f18fa023ff3604c365f4421280"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.785730 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" event={"ID":"59301323-d997-4af1-b2b0-4e8f0e7356f4","Type":"ContainerStarted","Data":"6a7c5453f95f26b22df62567787753e71709450eae2dc7ca99ceb54ff55160db"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.785894 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.787688 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" event={"ID":"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12","Type":"ContainerStarted","Data":"71f7b155a0f8b156da6b6a29e8e356a7cbf3e916ecba57592d74f1d00762355f"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.787723 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-kbdtf" event={"ID":"9dfc8bb5-28e8-4ba3-8009-09d5585a1a12","Type":"ContainerStarted","Data":"63429f9e1f6fb98407578467f1005b1a259cc32941291887a5393a00c2145c1e"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.789162 4995 generic.go:334] "Generic (PLEG): container finished" podID="cd88881d-ef13-4436-bcb4-0abad299bd29" containerID="7aa0db916323bb023ab7bc48682da37ac7047b86eb4bf6e360c1db3b1db3950a" exitCode=0 Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.789205 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"cd88881d-ef13-4436-bcb4-0abad299bd29","Type":"ContainerDied","Data":"7aa0db916323bb023ab7bc48682da37ac7047b86eb4bf6e360c1db3b1db3950a"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.790041 4995 generic.go:334] "Generic (PLEG): container finished" podID="0fce2f43-2741-49ce-88e7-58ef64ff24c7" containerID="817e6c715fae469a1ebbab4871d95b23d65cbb690fb2c39d5c373b0ab8340505" exitCode=0 Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.790096 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0fce2f43-2741-49ce-88e7-58ef64ff24c7","Type":"ContainerDied","Data":"817e6c715fae469a1ebbab4871d95b23d65cbb690fb2c39d5c373b0ab8340505"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.791480 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.795530 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.795916 4995 generic.go:334] "Generic (PLEG): container finished" podID="6915ccd2-d29a-4536-9a92-443f2c21c1c0" containerID="c83ed036827d9959063f61429b236d6fbe510c39eff2a8adaf4e589b253f247f" exitCode=0 Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.795967 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6915ccd2-d29a-4536-9a92-443f2c21c1c0","Type":"ContainerDied","Data":"c83ed036827d9959063f61429b236d6fbe510c39eff2a8adaf4e589b253f247f"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.797743 4995 generic.go:334] "Generic (PLEG): container finished" podID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerID="96c9b9241763ecce10b957e6f626525f611fc489f318843643efcc4213655b3f" exitCode=0 Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.797801 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59fd5" event={"ID":"f438d3ed-cdb7-438d-ba13-6ac749c18dea","Type":"ContainerDied","Data":"96c9b9241763ecce10b957e6f626525f611fc489f318843643efcc4213655b3f"} Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.798623 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.798612753 podStartE2EDuration="2.798612753s" podCreationTimestamp="2026-01-20 16:34:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:34:40.795768503 +0000 UTC m=+199.040373309" watchObservedRunningTime="2026-01-20 16:34:40.798612753 +0000 UTC m=+199.043217559" Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.836342 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" podStartSLOduration=10.83631152 podStartE2EDuration="10.83631152s" podCreationTimestamp="2026-01-20 16:34:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:34:40.836026992 +0000 UTC m=+199.080631788" watchObservedRunningTime="2026-01-20 16:34:40.83631152 +0000 UTC m=+199.080916326" Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.881742 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-kbdtf" podStartSLOduration=180.881727152 podStartE2EDuration="3m0.881727152s" podCreationTimestamp="2026-01-20 16:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:34:40.859915061 +0000 UTC m=+199.104519877" watchObservedRunningTime="2026-01-20 16:34:40.881727152 +0000 UTC m=+199.126331958" Jan 20 16:34:40 crc kubenswrapper[4995]: I0120 16:34:40.884098 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" podStartSLOduration=10.884067938 podStartE2EDuration="10.884067938s" podCreationTimestamp="2026-01-20 16:34:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:34:40.883163393 +0000 UTC m=+199.127768199" watchObservedRunningTime="2026-01-20 16:34:40.884067938 +0000 UTC m=+199.128672744" Jan 20 16:34:41 crc kubenswrapper[4995]: I0120 16:34:41.806575 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59fd5" event={"ID":"f438d3ed-cdb7-438d-ba13-6ac749c18dea","Type":"ContainerStarted","Data":"f53e94e8e40871a068f70449f96f1544a8545b565e48c8db3581aee348c5c404"} Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.098638 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.121093 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-59fd5" podStartSLOduration=4.281223691 podStartE2EDuration="52.121053296s" podCreationTimestamp="2026-01-20 16:33:50 +0000 UTC" firstStartedPulling="2026-01-20 16:33:53.442421972 +0000 UTC m=+151.687026778" lastFinishedPulling="2026-01-20 16:34:41.282251577 +0000 UTC m=+199.526856383" observedRunningTime="2026-01-20 16:34:41.824138625 +0000 UTC m=+200.068743461" watchObservedRunningTime="2026-01-20 16:34:42.121053296 +0000 UTC m=+200.365658092" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.186704 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.192374 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.260407 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kube-api-access\") pod \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\" (UID: \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\") " Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.260526 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kubelet-dir\") pod \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\" (UID: \"0fce2f43-2741-49ce-88e7-58ef64ff24c7\") " Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.260790 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "0fce2f43-2741-49ce-88e7-58ef64ff24c7" (UID: "0fce2f43-2741-49ce-88e7-58ef64ff24c7"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.266419 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0fce2f43-2741-49ce-88e7-58ef64ff24c7" (UID: "0fce2f43-2741-49ce-88e7-58ef64ff24c7"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.362010 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kubelet-dir\") pod \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\" (UID: \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\") " Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.362049 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kube-api-access\") pod \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\" (UID: \"6915ccd2-d29a-4536-9a92-443f2c21c1c0\") " Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.362066 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cd88881d-ef13-4436-bcb4-0abad299bd29-kube-api-access\") pod \"cd88881d-ef13-4436-bcb4-0abad299bd29\" (UID: \"cd88881d-ef13-4436-bcb4-0abad299bd29\") " Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.362207 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cd88881d-ef13-4436-bcb4-0abad299bd29-kubelet-dir\") pod \"cd88881d-ef13-4436-bcb4-0abad299bd29\" (UID: \"cd88881d-ef13-4436-bcb4-0abad299bd29\") " Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.362388 4995 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.362399 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0fce2f43-2741-49ce-88e7-58ef64ff24c7-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.362435 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd88881d-ef13-4436-bcb4-0abad299bd29-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "cd88881d-ef13-4436-bcb4-0abad299bd29" (UID: "cd88881d-ef13-4436-bcb4-0abad299bd29"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.362464 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6915ccd2-d29a-4536-9a92-443f2c21c1c0" (UID: "6915ccd2-d29a-4536-9a92-443f2c21c1c0"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.365899 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd88881d-ef13-4436-bcb4-0abad299bd29-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "cd88881d-ef13-4436-bcb4-0abad299bd29" (UID: "cd88881d-ef13-4436-bcb4-0abad299bd29"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.366791 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6915ccd2-d29a-4536-9a92-443f2c21c1c0" (UID: "6915ccd2-d29a-4536-9a92-443f2c21c1c0"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.464202 4995 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cd88881d-ef13-4436-bcb4-0abad299bd29-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.464241 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.464255 4995 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6915ccd2-d29a-4536-9a92-443f2c21c1c0-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.464265 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cd88881d-ef13-4436-bcb4-0abad299bd29-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.814100 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.814328 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"0fce2f43-2741-49ce-88e7-58ef64ff24c7","Type":"ContainerDied","Data":"e95b09962b00c727a0d6f886edaeaf47cab070cfe07f090f1f5d188a904f3f08"} Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.814376 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e95b09962b00c727a0d6f886edaeaf47cab070cfe07f090f1f5d188a904f3f08" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.815787 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.816337 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6915ccd2-d29a-4536-9a92-443f2c21c1c0","Type":"ContainerDied","Data":"408c20fa5b6b6ed5a9330c984d684c8e16168256d2622fab7316d26593eecc61"} Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.816382 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="408c20fa5b6b6ed5a9330c984d684c8e16168256d2622fab7316d26593eecc61" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.817595 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"cd88881d-ef13-4436-bcb4-0abad299bd29","Type":"ContainerDied","Data":"a4b2753522ef37ee2f1a8194ce89852979990cac1f0e31d34ef8b8f7e8b0273d"} Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.817629 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4b2753522ef37ee2f1a8194ce89852979990cac1f0e31d34ef8b8f7e8b0273d" Jan 20 16:34:42 crc kubenswrapper[4995]: I0120 16:34:42.817649 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 16:34:50 crc kubenswrapper[4995]: I0120 16:34:50.508005 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-645998df4b-bdtk6"] Jan 20 16:34:50 crc kubenswrapper[4995]: I0120 16:34:50.508674 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" podUID="59301323-d997-4af1-b2b0-4e8f0e7356f4" containerName="controller-manager" containerID="cri-o://1713f60aad76c2d000ae5630d50a1e7b5631c5f18fa023ff3604c365f4421280" gracePeriod=30 Jan 20 16:34:50 crc kubenswrapper[4995]: I0120 16:34:50.521922 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf"] Jan 20 16:34:50 crc kubenswrapper[4995]: I0120 16:34:50.522414 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" podUID="2d16567b-c923-4e03-9c9b-741c6aa9f1b8" containerName="route-controller-manager" containerID="cri-o://747f06597fab16f01759e752db972f8b8e4679e8088765be040cc895c99a011c" gracePeriod=30 Jan 20 16:34:50 crc kubenswrapper[4995]: I0120 16:34:50.857658 4995 generic.go:334] "Generic (PLEG): container finished" podID="59301323-d997-4af1-b2b0-4e8f0e7356f4" containerID="1713f60aad76c2d000ae5630d50a1e7b5631c5f18fa023ff3604c365f4421280" exitCode=0 Jan 20 16:34:50 crc kubenswrapper[4995]: I0120 16:34:50.857771 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" event={"ID":"59301323-d997-4af1-b2b0-4e8f0e7356f4","Type":"ContainerDied","Data":"1713f60aad76c2d000ae5630d50a1e7b5631c5f18fa023ff3604c365f4421280"} Jan 20 16:34:50 crc kubenswrapper[4995]: I0120 16:34:50.859992 4995 generic.go:334] "Generic (PLEG): container finished" podID="2d16567b-c923-4e03-9c9b-741c6aa9f1b8" containerID="747f06597fab16f01759e752db972f8b8e4679e8088765be040cc895c99a011c" exitCode=0 Jan 20 16:34:50 crc kubenswrapper[4995]: I0120 16:34:50.860043 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" event={"ID":"2d16567b-c923-4e03-9c9b-741c6aa9f1b8","Type":"ContainerDied","Data":"747f06597fab16f01759e752db972f8b8e4679e8088765be040cc895c99a011c"} Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.104034 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.113355 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.272055 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-serving-cert\") pod \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.272161 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-config\") pod \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.272255 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghk76\" (UniqueName: \"kubernetes.io/projected/59301323-d997-4af1-b2b0-4e8f0e7356f4-kube-api-access-ghk76\") pod \"59301323-d997-4af1-b2b0-4e8f0e7356f4\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.272283 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-client-ca\") pod \"59301323-d997-4af1-b2b0-4e8f0e7356f4\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.272304 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-proxy-ca-bundles\") pod \"59301323-d997-4af1-b2b0-4e8f0e7356f4\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.272331 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-client-ca\") pod \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.272354 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-config\") pod \"59301323-d997-4af1-b2b0-4e8f0e7356f4\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.272386 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jqnl\" (UniqueName: \"kubernetes.io/projected/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-kube-api-access-9jqnl\") pod \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\" (UID: \"2d16567b-c923-4e03-9c9b-741c6aa9f1b8\") " Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.272420 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59301323-d997-4af1-b2b0-4e8f0e7356f4-serving-cert\") pod \"59301323-d997-4af1-b2b0-4e8f0e7356f4\" (UID: \"59301323-d997-4af1-b2b0-4e8f0e7356f4\") " Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.273004 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-client-ca" (OuterVolumeSpecName: "client-ca") pod "59301323-d997-4af1-b2b0-4e8f0e7356f4" (UID: "59301323-d997-4af1-b2b0-4e8f0e7356f4"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.273059 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "59301323-d997-4af1-b2b0-4e8f0e7356f4" (UID: "59301323-d997-4af1-b2b0-4e8f0e7356f4"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.273555 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-config" (OuterVolumeSpecName: "config") pod "59301323-d997-4af1-b2b0-4e8f0e7356f4" (UID: "59301323-d997-4af1-b2b0-4e8f0e7356f4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.273590 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-client-ca" (OuterVolumeSpecName: "client-ca") pod "2d16567b-c923-4e03-9c9b-741c6aa9f1b8" (UID: "2d16567b-c923-4e03-9c9b-741c6aa9f1b8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.273625 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-config" (OuterVolumeSpecName: "config") pod "2d16567b-c923-4e03-9c9b-741c6aa9f1b8" (UID: "2d16567b-c923-4e03-9c9b-741c6aa9f1b8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.279016 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59301323-d997-4af1-b2b0-4e8f0e7356f4-kube-api-access-ghk76" (OuterVolumeSpecName: "kube-api-access-ghk76") pod "59301323-d997-4af1-b2b0-4e8f0e7356f4" (UID: "59301323-d997-4af1-b2b0-4e8f0e7356f4"). InnerVolumeSpecName "kube-api-access-ghk76". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.279032 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-kube-api-access-9jqnl" (OuterVolumeSpecName: "kube-api-access-9jqnl") pod "2d16567b-c923-4e03-9c9b-741c6aa9f1b8" (UID: "2d16567b-c923-4e03-9c9b-741c6aa9f1b8"). InnerVolumeSpecName "kube-api-access-9jqnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.279175 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59301323-d997-4af1-b2b0-4e8f0e7356f4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "59301323-d997-4af1-b2b0-4e8f0e7356f4" (UID: "59301323-d997-4af1-b2b0-4e8f0e7356f4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.279265 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2d16567b-c923-4e03-9c9b-741c6aa9f1b8" (UID: "2d16567b-c923-4e03-9c9b-741c6aa9f1b8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.339673 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.339730 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.374179 4995 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.374210 4995 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.374219 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.374228 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jqnl\" (UniqueName: \"kubernetes.io/projected/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-kube-api-access-9jqnl\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.374239 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/59301323-d997-4af1-b2b0-4e8f0e7356f4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.374248 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.374255 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d16567b-c923-4e03-9c9b-741c6aa9f1b8-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.374265 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghk76\" (UniqueName: \"kubernetes.io/projected/59301323-d997-4af1-b2b0-4e8f0e7356f4-kube-api-access-ghk76\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.374274 4995 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/59301323-d997-4af1-b2b0-4e8f0e7356f4-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.407157 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.867551 4995 generic.go:334] "Generic (PLEG): container finished" podID="7087f2d6-d879-419d-bd93-538d617dcc91" containerID="4514eb97b79ac3d97e7e923975e792a3337eef4e862008949de64ce0f8275e63" exitCode=0 Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.867617 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w9bjs" event={"ID":"7087f2d6-d879-419d-bd93-538d617dcc91","Type":"ContainerDied","Data":"4514eb97b79ac3d97e7e923975e792a3337eef4e862008949de64ce0f8275e63"} Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.879935 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" event={"ID":"2d16567b-c923-4e03-9c9b-741c6aa9f1b8","Type":"ContainerDied","Data":"83c083eb9e948215570ce89a5d4e1584034fb0d6023c08966ee18a6f9d4bbb75"} Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.879995 4995 scope.go:117] "RemoveContainer" containerID="747f06597fab16f01759e752db972f8b8e4679e8088765be040cc895c99a011c" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.880176 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.886071 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.892205 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-645998df4b-bdtk6" event={"ID":"59301323-d997-4af1-b2b0-4e8f0e7356f4","Type":"ContainerDied","Data":"6a7c5453f95f26b22df62567787753e71709450eae2dc7ca99ceb54ff55160db"} Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.918308 4995 scope.go:117] "RemoveContainer" containerID="1713f60aad76c2d000ae5630d50a1e7b5631c5f18fa023ff3604c365f4421280" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.943291 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.970936 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf"] Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.976894 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65bdd9b95-fnnqf"] Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.979894 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-645998df4b-bdtk6"] Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.982053 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-645998df4b-bdtk6"] Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.997948 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d16567b-c923-4e03-9c9b-741c6aa9f1b8" path="/var/lib/kubelet/pods/2d16567b-c923-4e03-9c9b-741c6aa9f1b8/volumes" Jan 20 16:34:51 crc kubenswrapper[4995]: I0120 16:34:51.999113 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59301323-d997-4af1-b2b0-4e8f0e7356f4" path="/var/lib/kubelet/pods/59301323-d997-4af1-b2b0-4e8f0e7356f4/volumes" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.125711 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw"] Jan 20 16:34:52 crc kubenswrapper[4995]: E0120 16:34:52.126269 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59301323-d997-4af1-b2b0-4e8f0e7356f4" containerName="controller-manager" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.126342 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="59301323-d997-4af1-b2b0-4e8f0e7356f4" containerName="controller-manager" Jan 20 16:34:52 crc kubenswrapper[4995]: E0120 16:34:52.126412 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fce2f43-2741-49ce-88e7-58ef64ff24c7" containerName="pruner" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.126553 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fce2f43-2741-49ce-88e7-58ef64ff24c7" containerName="pruner" Jan 20 16:34:52 crc kubenswrapper[4995]: E0120 16:34:52.126643 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd88881d-ef13-4436-bcb4-0abad299bd29" containerName="pruner" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.126697 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd88881d-ef13-4436-bcb4-0abad299bd29" containerName="pruner" Jan 20 16:34:52 crc kubenswrapper[4995]: E0120 16:34:52.126833 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d16567b-c923-4e03-9c9b-741c6aa9f1b8" containerName="route-controller-manager" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.126903 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d16567b-c923-4e03-9c9b-741c6aa9f1b8" containerName="route-controller-manager" Jan 20 16:34:52 crc kubenswrapper[4995]: E0120 16:34:52.126963 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6915ccd2-d29a-4536-9a92-443f2c21c1c0" containerName="pruner" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.127307 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6915ccd2-d29a-4536-9a92-443f2c21c1c0" containerName="pruner" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.127482 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="59301323-d997-4af1-b2b0-4e8f0e7356f4" containerName="controller-manager" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.127575 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fce2f43-2741-49ce-88e7-58ef64ff24c7" containerName="pruner" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.127642 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd88881d-ef13-4436-bcb4-0abad299bd29" containerName="pruner" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.127699 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d16567b-c923-4e03-9c9b-741c6aa9f1b8" containerName="route-controller-manager" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.127760 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="6915ccd2-d29a-4536-9a92-443f2c21c1c0" containerName="pruner" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.128386 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.130738 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.130909 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.131210 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.132217 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.132357 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.132803 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.135751 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57598776b-7kng9"] Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.136835 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.138237 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.138831 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.139237 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.139238 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.140689 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.141006 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.141008 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.141864 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57598776b-7kng9"] Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.146427 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw"] Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.285415 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-config\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.285664 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-config\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.285742 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/336c4a64-e3a7-451d-be5a-8a78334e3014-serving-cert\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.285825 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-proxy-ca-bundles\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.285901 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jj4h\" (UniqueName: \"kubernetes.io/projected/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-kube-api-access-8jj4h\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.286094 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-client-ca\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.286149 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-client-ca\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.286198 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-serving-cert\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.286307 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6hx7\" (UniqueName: \"kubernetes.io/projected/336c4a64-e3a7-451d-be5a-8a78334e3014-kube-api-access-t6hx7\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.387577 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-client-ca\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.387881 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-serving-cert\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.387934 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6hx7\" (UniqueName: \"kubernetes.io/projected/336c4a64-e3a7-451d-be5a-8a78334e3014-kube-api-access-t6hx7\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.387972 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-config\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.388002 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-config\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.388023 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/336c4a64-e3a7-451d-be5a-8a78334e3014-serving-cert\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.388055 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-proxy-ca-bundles\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.388095 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jj4h\" (UniqueName: \"kubernetes.io/projected/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-kube-api-access-8jj4h\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.388126 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-client-ca\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.388351 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-client-ca\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.388898 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-client-ca\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.390270 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-proxy-ca-bundles\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.390355 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-config\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.396666 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-config\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.407218 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-serving-cert\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.407526 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/336c4a64-e3a7-451d-be5a-8a78334e3014-serving-cert\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.420887 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jj4h\" (UniqueName: \"kubernetes.io/projected/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-kube-api-access-8jj4h\") pod \"route-controller-manager-57598776b-7kng9\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.422049 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6hx7\" (UniqueName: \"kubernetes.io/projected/336c4a64-e3a7-451d-be5a-8a78334e3014-kube-api-access-t6hx7\") pod \"controller-manager-8445f6f7cd-vxmcw\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.456133 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.464262 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.675085 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw"] Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.706057 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57598776b-7kng9"] Jan 20 16:34:52 crc kubenswrapper[4995]: W0120 16:34:52.727531 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c222fc0_ec8f_4403_9339_ee8d2da7fc5e.slice/crio-db8af03ccff4c4de8ac8ced9a00a3978ca751ef456256d06d41ef551e90564b7 WatchSource:0}: Error finding container db8af03ccff4c4de8ac8ced9a00a3978ca751ef456256d06d41ef551e90564b7: Status 404 returned error can't find the container with id db8af03ccff4c4de8ac8ced9a00a3978ca751ef456256d06d41ef551e90564b7 Jan 20 16:34:52 crc kubenswrapper[4995]: W0120 16:34:52.728041 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod336c4a64_e3a7_451d_be5a_8a78334e3014.slice/crio-1e2196ea2a75d4c5db2b1d491acb898abc6972f5a040353bed6362a751e5903c WatchSource:0}: Error finding container 1e2196ea2a75d4c5db2b1d491acb898abc6972f5a040353bed6362a751e5903c: Status 404 returned error can't find the container with id 1e2196ea2a75d4c5db2b1d491acb898abc6972f5a040353bed6362a751e5903c Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.908468 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" event={"ID":"336c4a64-e3a7-451d-be5a-8a78334e3014","Type":"ContainerStarted","Data":"1e2196ea2a75d4c5db2b1d491acb898abc6972f5a040353bed6362a751e5903c"} Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.909958 4995 generic.go:334] "Generic (PLEG): container finished" podID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerID="b3b8309757a99842bfc414b2173e7b0f8e7ce7642eaf2055d63686f5adb46aaa" exitCode=0 Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.909989 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-twpdq" event={"ID":"45b2120d-74d7-4b92-90c6-18b7bbe7375e","Type":"ContainerDied","Data":"b3b8309757a99842bfc414b2173e7b0f8e7ce7642eaf2055d63686f5adb46aaa"} Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.916238 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" event={"ID":"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e","Type":"ContainerStarted","Data":"db8af03ccff4c4de8ac8ced9a00a3978ca751ef456256d06d41ef551e90564b7"} Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.918393 4995 generic.go:334] "Generic (PLEG): container finished" podID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerID="6eec1cfdcd0d3816abce498bd735c715d0290ff4d427b4e01d17a7005959b07f" exitCode=0 Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.918447 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-brxnm" event={"ID":"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c","Type":"ContainerDied","Data":"6eec1cfdcd0d3816abce498bd735c715d0290ff4d427b4e01d17a7005959b07f"} Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.923403 4995 generic.go:334] "Generic (PLEG): container finished" podID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerID="f52ea2b342c8caff3972eeda0448f4608556b8476789ae6e2cfcedad8e66d64e" exitCode=0 Jan 20 16:34:52 crc kubenswrapper[4995]: I0120 16:34:52.923925 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4tw2" event={"ID":"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0","Type":"ContainerDied","Data":"f52ea2b342c8caff3972eeda0448f4608556b8476789ae6e2cfcedad8e66d64e"} Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.930353 4995 generic.go:334] "Generic (PLEG): container finished" podID="b57345df-b284-4e63-b77d-f60534099876" containerID="2c4fb876e9dc017f6f61c20ccc99277383c0add22a61c1bb27d698fdc039acba" exitCode=0 Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.930410 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nmxgc" event={"ID":"b57345df-b284-4e63-b77d-f60534099876","Type":"ContainerDied","Data":"2c4fb876e9dc017f6f61c20ccc99277383c0add22a61c1bb27d698fdc039acba"} Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.932599 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" event={"ID":"336c4a64-e3a7-451d-be5a-8a78334e3014","Type":"ContainerStarted","Data":"f7206c1c90d09c2ee5905b24af328372d6a446a7b09da7da20d54328d9f9e02b"} Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.933137 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.936145 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" event={"ID":"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e","Type":"ContainerStarted","Data":"37e8f8085c2c807e076588f197ebb39280f5e01518cbc52a6222631eb1075109"} Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.936560 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.939599 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.940062 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w9bjs" event={"ID":"7087f2d6-d879-419d-bd93-538d617dcc91","Type":"ContainerStarted","Data":"deea17510e6347a6d2081e2674db926600caaf64989ea2497ceee70d68634953"} Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.943012 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.943997 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-brxnm" event={"ID":"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c","Type":"ContainerStarted","Data":"01ada4fc7b6e30c38e97ae488db67e4e2074da5beb4929946ead938bdfaa6d8b"} Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.945923 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4tw2" event={"ID":"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0","Type":"ContainerStarted","Data":"af416724f5827a2d74a7d1ca311faff4aadf4e39b8b1bbb1bc86dd87527e0558"} Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.967599 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-brxnm" podStartSLOduration=1.96892 podStartE2EDuration="1m2.967580933s" podCreationTimestamp="2026-01-20 16:33:51 +0000 UTC" firstStartedPulling="2026-01-20 16:33:52.358177274 +0000 UTC m=+150.602782080" lastFinishedPulling="2026-01-20 16:34:53.356838207 +0000 UTC m=+211.601443013" observedRunningTime="2026-01-20 16:34:53.966092112 +0000 UTC m=+212.210696928" watchObservedRunningTime="2026-01-20 16:34:53.967580933 +0000 UTC m=+212.212185739" Jan 20 16:34:53 crc kubenswrapper[4995]: I0120 16:34:53.983818 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" podStartSLOduration=3.983798168 podStartE2EDuration="3.983798168s" podCreationTimestamp="2026-01-20 16:34:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:34:53.982961395 +0000 UTC m=+212.227566201" watchObservedRunningTime="2026-01-20 16:34:53.983798168 +0000 UTC m=+212.228402984" Jan 20 16:34:54 crc kubenswrapper[4995]: I0120 16:34:54.010522 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" podStartSLOduration=4.010495377 podStartE2EDuration="4.010495377s" podCreationTimestamp="2026-01-20 16:34:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:34:54.001532615 +0000 UTC m=+212.246137421" watchObservedRunningTime="2026-01-20 16:34:54.010495377 +0000 UTC m=+212.255100193" Jan 20 16:34:54 crc kubenswrapper[4995]: I0120 16:34:54.026274 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w9bjs" podStartSLOduration=4.652338897 podStartE2EDuration="1m4.026258628s" podCreationTimestamp="2026-01-20 16:33:50 +0000 UTC" firstStartedPulling="2026-01-20 16:33:53.42535514 +0000 UTC m=+151.669959956" lastFinishedPulling="2026-01-20 16:34:52.799274881 +0000 UTC m=+211.043879687" observedRunningTime="2026-01-20 16:34:54.025126717 +0000 UTC m=+212.269731513" watchObservedRunningTime="2026-01-20 16:34:54.026258628 +0000 UTC m=+212.270863434" Jan 20 16:34:54 crc kubenswrapper[4995]: I0120 16:34:54.056880 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-z4tw2" podStartSLOduration=3.107763053 podStartE2EDuration="1m1.056861476s" podCreationTimestamp="2026-01-20 16:33:53 +0000 UTC" firstStartedPulling="2026-01-20 16:33:55.490129723 +0000 UTC m=+153.734734529" lastFinishedPulling="2026-01-20 16:34:53.439228146 +0000 UTC m=+211.683832952" observedRunningTime="2026-01-20 16:34:54.054227862 +0000 UTC m=+212.298832678" watchObservedRunningTime="2026-01-20 16:34:54.056861476 +0000 UTC m=+212.301466282" Jan 20 16:34:54 crc kubenswrapper[4995]: I0120 16:34:54.090382 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:34:54 crc kubenswrapper[4995]: I0120 16:34:54.090432 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:34:55 crc kubenswrapper[4995]: I0120 16:34:55.129318 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-z4tw2" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerName="registry-server" probeResult="failure" output=< Jan 20 16:34:55 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 16:34:55 crc kubenswrapper[4995]: > Jan 20 16:34:55 crc kubenswrapper[4995]: I0120 16:34:55.183636 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-59fd5"] Jan 20 16:34:55 crc kubenswrapper[4995]: I0120 16:34:55.183871 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-59fd5" podUID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerName="registry-server" containerID="cri-o://f53e94e8e40871a068f70449f96f1544a8545b565e48c8db3581aee348c5c404" gracePeriod=2 Jan 20 16:34:56 crc kubenswrapper[4995]: I0120 16:34:56.959851 4995 generic.go:334] "Generic (PLEG): container finished" podID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerID="f53e94e8e40871a068f70449f96f1544a8545b565e48c8db3581aee348c5c404" exitCode=0 Jan 20 16:34:56 crc kubenswrapper[4995]: I0120 16:34:56.959913 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59fd5" event={"ID":"f438d3ed-cdb7-438d-ba13-6ac749c18dea","Type":"ContainerDied","Data":"f53e94e8e40871a068f70449f96f1544a8545b565e48c8db3581aee348c5c404"} Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.541462 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.659446 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-utilities\") pod \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.659534 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mrc46\" (UniqueName: \"kubernetes.io/projected/f438d3ed-cdb7-438d-ba13-6ac749c18dea-kube-api-access-mrc46\") pod \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.659605 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-catalog-content\") pod \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\" (UID: \"f438d3ed-cdb7-438d-ba13-6ac749c18dea\") " Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.660938 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-utilities" (OuterVolumeSpecName: "utilities") pod "f438d3ed-cdb7-438d-ba13-6ac749c18dea" (UID: "f438d3ed-cdb7-438d-ba13-6ac749c18dea"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.666730 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f438d3ed-cdb7-438d-ba13-6ac749c18dea-kube-api-access-mrc46" (OuterVolumeSpecName: "kube-api-access-mrc46") pod "f438d3ed-cdb7-438d-ba13-6ac749c18dea" (UID: "f438d3ed-cdb7-438d-ba13-6ac749c18dea"). InnerVolumeSpecName "kube-api-access-mrc46". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.708701 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f438d3ed-cdb7-438d-ba13-6ac749c18dea" (UID: "f438d3ed-cdb7-438d-ba13-6ac749c18dea"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.761836 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mrc46\" (UniqueName: \"kubernetes.io/projected/f438d3ed-cdb7-438d-ba13-6ac749c18dea-kube-api-access-mrc46\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.761890 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.761903 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f438d3ed-cdb7-438d-ba13-6ac749c18dea-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.970105 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59fd5" event={"ID":"f438d3ed-cdb7-438d-ba13-6ac749c18dea","Type":"ContainerDied","Data":"185d75aef69bbdeebdf6279ce92a149c3f1b4377a6a77a6d1cd5fcca476d32dd"} Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.970186 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59fd5" Jan 20 16:34:57 crc kubenswrapper[4995]: I0120 16:34:57.970474 4995 scope.go:117] "RemoveContainer" containerID="f53e94e8e40871a068f70449f96f1544a8545b565e48c8db3581aee348c5c404" Jan 20 16:34:58 crc kubenswrapper[4995]: I0120 16:34:58.000373 4995 scope.go:117] "RemoveContainer" containerID="96c9b9241763ecce10b957e6f626525f611fc489f318843643efcc4213655b3f" Jan 20 16:34:58 crc kubenswrapper[4995]: I0120 16:34:58.021321 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-59fd5"] Jan 20 16:34:58 crc kubenswrapper[4995]: I0120 16:34:58.021503 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-59fd5"] Jan 20 16:34:58 crc kubenswrapper[4995]: I0120 16:34:58.027666 4995 scope.go:117] "RemoveContainer" containerID="f55fb70576eb5594a3e573f7efaf839622bf1b3788f3236de9dc1d282c194e7a" Jan 20 16:34:59 crc kubenswrapper[4995]: I0120 16:34:59.988932 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-twpdq" event={"ID":"45b2120d-74d7-4b92-90c6-18b7bbe7375e","Type":"ContainerStarted","Data":"a34d9c4a769bd9623c680053b6dc85d2df654618691071aa051a41eda0d91b6d"} Jan 20 16:34:59 crc kubenswrapper[4995]: I0120 16:34:59.998617 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" path="/var/lib/kubelet/pods/f438d3ed-cdb7-438d-ba13-6ac749c18dea/volumes" Jan 20 16:35:00 crc kubenswrapper[4995]: I0120 16:35:00.015062 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-twpdq" podStartSLOduration=3.925241681 podStartE2EDuration="1m6.015044388s" podCreationTimestamp="2026-01-20 16:33:54 +0000 UTC" firstStartedPulling="2026-01-20 16:33:55.498941092 +0000 UTC m=+153.743545898" lastFinishedPulling="2026-01-20 16:34:57.588743799 +0000 UTC m=+215.833348605" observedRunningTime="2026-01-20 16:35:00.012056374 +0000 UTC m=+218.256661190" watchObservedRunningTime="2026-01-20 16:35:00.015044388 +0000 UTC m=+218.259649194" Jan 20 16:35:00 crc kubenswrapper[4995]: I0120 16:35:00.572049 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:35:00 crc kubenswrapper[4995]: I0120 16:35:00.572169 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:35:00 crc kubenswrapper[4995]: I0120 16:35:00.572205 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:35:00 crc kubenswrapper[4995]: I0120 16:35:00.572689 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 16:35:00 crc kubenswrapper[4995]: I0120 16:35:00.572778 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142" gracePeriod=600 Jan 20 16:35:01 crc kubenswrapper[4995]: I0120 16:35:01.403469 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:35:01 crc kubenswrapper[4995]: I0120 16:35:01.403518 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:35:01 crc kubenswrapper[4995]: I0120 16:35:01.447343 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:35:01 crc kubenswrapper[4995]: I0120 16:35:01.540606 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:35:01 crc kubenswrapper[4995]: I0120 16:35:01.540663 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:35:01 crc kubenswrapper[4995]: I0120 16:35:01.585176 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:35:02 crc kubenswrapper[4995]: I0120 16:35:02.000481 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142" exitCode=0 Jan 20 16:35:02 crc kubenswrapper[4995]: I0120 16:35:02.000547 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142"} Jan 20 16:35:02 crc kubenswrapper[4995]: I0120 16:35:02.052872 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:35:02 crc kubenswrapper[4995]: I0120 16:35:02.054053 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:35:02 crc kubenswrapper[4995]: I0120 16:35:02.389029 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-brxnm"] Jan 20 16:35:04 crc kubenswrapper[4995]: I0120 16:35:04.014799 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-brxnm" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerName="registry-server" containerID="cri-o://01ada4fc7b6e30c38e97ae488db67e4e2074da5beb4929946ead938bdfaa6d8b" gracePeriod=2 Jan 20 16:35:04 crc kubenswrapper[4995]: I0120 16:35:04.137073 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:35:04 crc kubenswrapper[4995]: I0120 16:35:04.186610 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:35:04 crc kubenswrapper[4995]: I0120 16:35:04.515388 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:35:04 crc kubenswrapper[4995]: I0120 16:35:04.516059 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:35:05 crc kubenswrapper[4995]: I0120 16:35:05.560783 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-twpdq" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerName="registry-server" probeResult="failure" output=< Jan 20 16:35:05 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 16:35:05 crc kubenswrapper[4995]: > Jan 20 16:35:06 crc kubenswrapper[4995]: I0120 16:35:06.026018 4995 generic.go:334] "Generic (PLEG): container finished" podID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerID="01ada4fc7b6e30c38e97ae488db67e4e2074da5beb4929946ead938bdfaa6d8b" exitCode=0 Jan 20 16:35:06 crc kubenswrapper[4995]: I0120 16:35:06.026118 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-brxnm" event={"ID":"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c","Type":"ContainerDied","Data":"01ada4fc7b6e30c38e97ae488db67e4e2074da5beb4929946ead938bdfaa6d8b"} Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.306366 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.408312 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-utilities\") pod \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.408403 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22dg9\" (UniqueName: \"kubernetes.io/projected/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-kube-api-access-22dg9\") pod \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.408456 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-catalog-content\") pod \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\" (UID: \"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c\") " Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.409426 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-utilities" (OuterVolumeSpecName: "utilities") pod "1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" (UID: "1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.416214 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-kube-api-access-22dg9" (OuterVolumeSpecName: "kube-api-access-22dg9") pod "1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" (UID: "1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c"). InnerVolumeSpecName "kube-api-access-22dg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.455295 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" (UID: "1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.509873 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.509913 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22dg9\" (UniqueName: \"kubernetes.io/projected/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-kube-api-access-22dg9\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:08 crc kubenswrapper[4995]: I0120 16:35:08.509929 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:09 crc kubenswrapper[4995]: I0120 16:35:09.045164 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-brxnm" event={"ID":"1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c","Type":"ContainerDied","Data":"db88e9f8590b6b4477cf2b2205804482fd541082af8781e40f7a4817754984b7"} Jan 20 16:35:09 crc kubenswrapper[4995]: I0120 16:35:09.045235 4995 scope.go:117] "RemoveContainer" containerID="01ada4fc7b6e30c38e97ae488db67e4e2074da5beb4929946ead938bdfaa6d8b" Jan 20 16:35:09 crc kubenswrapper[4995]: I0120 16:35:09.045281 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-brxnm" Jan 20 16:35:09 crc kubenswrapper[4995]: I0120 16:35:09.067125 4995 scope.go:117] "RemoveContainer" containerID="6eec1cfdcd0d3816abce498bd735c715d0290ff4d427b4e01d17a7005959b07f" Jan 20 16:35:09 crc kubenswrapper[4995]: I0120 16:35:09.087523 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-brxnm"] Jan 20 16:35:09 crc kubenswrapper[4995]: I0120 16:35:09.092854 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-brxnm"] Jan 20 16:35:09 crc kubenswrapper[4995]: I0120 16:35:09.093401 4995 scope.go:117] "RemoveContainer" containerID="575d6e2443d123c6e77456581fa83c2258462c479142e8fa30425857f268a19e" Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.002222 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" path="/var/lib/kubelet/pods/1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c/volumes" Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.055338 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"bb947a8aacc0062f1c7243926da3eb2ca7a8e73c037234e2af5d64615ba3f08d"} Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.058293 4995 generic.go:334] "Generic (PLEG): container finished" podID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerID="86e776f3485911accb0959468f006208a76a730f203161fc2e3476195625060c" exitCode=0 Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.058361 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq48v" event={"ID":"85820e5b-3be3-43a1-a954-7ce3719e24b5","Type":"ContainerDied","Data":"86e776f3485911accb0959468f006208a76a730f203161fc2e3476195625060c"} Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.060647 4995 generic.go:334] "Generic (PLEG): container finished" podID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerID="a63a5ad96425fae4f1694dda17079a3c51568e08edafcb0cb1fb4541757b8abd" exitCode=0 Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.060693 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s79" event={"ID":"58893b4f-0622-48ee-bc1d-24ed2b499606","Type":"ContainerDied","Data":"a63a5ad96425fae4f1694dda17079a3c51568e08edafcb0cb1fb4541757b8abd"} Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.063647 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nmxgc" event={"ID":"b57345df-b284-4e63-b77d-f60534099876","Type":"ContainerStarted","Data":"14b30725d707cdde31224950cf38e488c7381ab7d755adef48f86a4bc08e96c6"} Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.126200 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nmxgc" podStartSLOduration=4.482805029 podStartE2EDuration="1m20.1261848s" podCreationTimestamp="2026-01-20 16:33:50 +0000 UTC" firstStartedPulling="2026-01-20 16:33:52.401386726 +0000 UTC m=+150.645991522" lastFinishedPulling="2026-01-20 16:35:08.044766487 +0000 UTC m=+226.289371293" observedRunningTime="2026-01-20 16:35:10.122354293 +0000 UTC m=+228.366959129" watchObservedRunningTime="2026-01-20 16:35:10.1261848 +0000 UTC m=+228.370789606" Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.470355 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw"] Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.470600 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" podUID="336c4a64-e3a7-451d-be5a-8a78334e3014" containerName="controller-manager" containerID="cri-o://f7206c1c90d09c2ee5905b24af328372d6a446a7b09da7da20d54328d9f9e02b" gracePeriod=30 Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.573283 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57598776b-7kng9"] Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.573654 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" podUID="5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" containerName="route-controller-manager" containerID="cri-o://37e8f8085c2c807e076588f197ebb39280f5e01518cbc52a6222631eb1075109" gracePeriod=30 Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.986145 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:35:10 crc kubenswrapper[4995]: I0120 16:35:10.986434 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.082375 4995 generic.go:334] "Generic (PLEG): container finished" podID="336c4a64-e3a7-451d-be5a-8a78334e3014" containerID="f7206c1c90d09c2ee5905b24af328372d6a446a7b09da7da20d54328d9f9e02b" exitCode=0 Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.082495 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" event={"ID":"336c4a64-e3a7-451d-be5a-8a78334e3014","Type":"ContainerDied","Data":"f7206c1c90d09c2ee5905b24af328372d6a446a7b09da7da20d54328d9f9e02b"} Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.084769 4995 generic.go:334] "Generic (PLEG): container finished" podID="5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" containerID="37e8f8085c2c807e076588f197ebb39280f5e01518cbc52a6222631eb1075109" exitCode=0 Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.084914 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" event={"ID":"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e","Type":"ContainerDied","Data":"37e8f8085c2c807e076588f197ebb39280f5e01518cbc52a6222631eb1075109"} Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.512946 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538339 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6d9449d755-dtx27"] Jan 20 16:35:11 crc kubenswrapper[4995]: E0120 16:35:11.538549 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerName="extract-utilities" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538564 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerName="extract-utilities" Jan 20 16:35:11 crc kubenswrapper[4995]: E0120 16:35:11.538575 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerName="registry-server" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538582 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerName="registry-server" Jan 20 16:35:11 crc kubenswrapper[4995]: E0120 16:35:11.538595 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerName="extract-content" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538601 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerName="extract-content" Jan 20 16:35:11 crc kubenswrapper[4995]: E0120 16:35:11.538612 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerName="extract-utilities" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538618 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerName="extract-utilities" Jan 20 16:35:11 crc kubenswrapper[4995]: E0120 16:35:11.538626 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerName="registry-server" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538633 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerName="registry-server" Jan 20 16:35:11 crc kubenswrapper[4995]: E0120 16:35:11.538646 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerName="extract-content" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538651 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerName="extract-content" Jan 20 16:35:11 crc kubenswrapper[4995]: E0120 16:35:11.538660 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="336c4a64-e3a7-451d-be5a-8a78334e3014" containerName="controller-manager" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538666 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="336c4a64-e3a7-451d-be5a-8a78334e3014" containerName="controller-manager" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538755 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d61ecd0-1a0d-4e90-9ab6-49d15d546f6c" containerName="registry-server" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538765 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f438d3ed-cdb7-438d-ba13-6ac749c18dea" containerName="registry-server" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.538775 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="336c4a64-e3a7-451d-be5a-8a78334e3014" containerName="controller-manager" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.539188 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.541208 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.559428 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6d9449d755-dtx27"] Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653540 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-proxy-ca-bundles\") pod \"336c4a64-e3a7-451d-be5a-8a78334e3014\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653575 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-client-ca\") pod \"336c4a64-e3a7-451d-be5a-8a78334e3014\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653594 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6hx7\" (UniqueName: \"kubernetes.io/projected/336c4a64-e3a7-451d-be5a-8a78334e3014-kube-api-access-t6hx7\") pod \"336c4a64-e3a7-451d-be5a-8a78334e3014\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653620 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-client-ca\") pod \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653654 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-config\") pod \"336c4a64-e3a7-451d-be5a-8a78334e3014\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653694 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jj4h\" (UniqueName: \"kubernetes.io/projected/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-kube-api-access-8jj4h\") pod \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653716 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-config\") pod \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653753 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-serving-cert\") pod \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\" (UID: \"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e\") " Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653773 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/336c4a64-e3a7-451d-be5a-8a78334e3014-serving-cert\") pod \"336c4a64-e3a7-451d-be5a-8a78334e3014\" (UID: \"336c4a64-e3a7-451d-be5a-8a78334e3014\") " Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653904 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-proxy-ca-bundles\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653929 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-config\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653963 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-serving-cert\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.653980 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgsww\" (UniqueName: \"kubernetes.io/projected/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-kube-api-access-mgsww\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.654013 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-client-ca\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.654381 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-client-ca" (OuterVolumeSpecName: "client-ca") pod "336c4a64-e3a7-451d-be5a-8a78334e3014" (UID: "336c4a64-e3a7-451d-be5a-8a78334e3014"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.654405 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "336c4a64-e3a7-451d-be5a-8a78334e3014" (UID: "336c4a64-e3a7-451d-be5a-8a78334e3014"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.654460 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-config" (OuterVolumeSpecName: "config") pod "336c4a64-e3a7-451d-be5a-8a78334e3014" (UID: "336c4a64-e3a7-451d-be5a-8a78334e3014"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.654484 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-client-ca" (OuterVolumeSpecName: "client-ca") pod "5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" (UID: "5c222fc0-ec8f-4403-9339-ee8d2da7fc5e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.655010 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-config" (OuterVolumeSpecName: "config") pod "5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" (UID: "5c222fc0-ec8f-4403-9339-ee8d2da7fc5e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.658532 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-kube-api-access-8jj4h" (OuterVolumeSpecName: "kube-api-access-8jj4h") pod "5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" (UID: "5c222fc0-ec8f-4403-9339-ee8d2da7fc5e"). InnerVolumeSpecName "kube-api-access-8jj4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.661049 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/336c4a64-e3a7-451d-be5a-8a78334e3014-kube-api-access-t6hx7" (OuterVolumeSpecName: "kube-api-access-t6hx7") pod "336c4a64-e3a7-451d-be5a-8a78334e3014" (UID: "336c4a64-e3a7-451d-be5a-8a78334e3014"). InnerVolumeSpecName "kube-api-access-t6hx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.665572 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/336c4a64-e3a7-451d-be5a-8a78334e3014-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "336c4a64-e3a7-451d-be5a-8a78334e3014" (UID: "336c4a64-e3a7-451d-be5a-8a78334e3014"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.669235 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" (UID: "5c222fc0-ec8f-4403-9339-ee8d2da7fc5e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.754996 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-client-ca\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755112 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-proxy-ca-bundles\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755144 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-config\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755193 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-serving-cert\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755215 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgsww\" (UniqueName: \"kubernetes.io/projected/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-kube-api-access-mgsww\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755271 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755287 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jj4h\" (UniqueName: \"kubernetes.io/projected/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-kube-api-access-8jj4h\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755299 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755310 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755321 4995 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/336c4a64-e3a7-451d-be5a-8a78334e3014-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755332 4995 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755342 4995 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/336c4a64-e3a7-451d-be5a-8a78334e3014-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755353 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6hx7\" (UniqueName: \"kubernetes.io/projected/336c4a64-e3a7-451d-be5a-8a78334e3014-kube-api-access-t6hx7\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.755363 4995 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.756759 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-client-ca\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.756834 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-proxy-ca-bundles\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.757547 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-config\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.759834 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-serving-cert\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.781750 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgsww\" (UniqueName: \"kubernetes.io/projected/d7d6474e-b1f9-4310-a3b6-a59b3eefdb11-kube-api-access-mgsww\") pod \"controller-manager-6d9449d755-dtx27\" (UID: \"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11\") " pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:11 crc kubenswrapper[4995]: I0120 16:35:11.849826 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.048892 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nmxgc" podUID="b57345df-b284-4e63-b77d-f60534099876" containerName="registry-server" probeResult="failure" output=< Jan 20 16:35:12 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 16:35:12 crc kubenswrapper[4995]: > Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.063466 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6d9449d755-dtx27"] Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.090553 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" event={"ID":"336c4a64-e3a7-451d-be5a-8a78334e3014","Type":"ContainerDied","Data":"1e2196ea2a75d4c5db2b1d491acb898abc6972f5a040353bed6362a751e5903c"} Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.090612 4995 scope.go:117] "RemoveContainer" containerID="f7206c1c90d09c2ee5905b24af328372d6a446a7b09da7da20d54328d9f9e02b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.090711 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.095660 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" event={"ID":"5c222fc0-ec8f-4403-9339-ee8d2da7fc5e","Type":"ContainerDied","Data":"db8af03ccff4c4de8ac8ced9a00a3978ca751ef456256d06d41ef551e90564b7"} Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.095774 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57598776b-7kng9" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.099896 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq48v" event={"ID":"85820e5b-3be3-43a1-a954-7ce3719e24b5","Type":"ContainerStarted","Data":"35644e07268412802f9700c81b7eb1507ee7a53e988751a44b07796942b58984"} Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.107609 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s79" event={"ID":"58893b4f-0622-48ee-bc1d-24ed2b499606","Type":"ContainerStarted","Data":"ab5f422472a7141fb8e77c5148de87bc75f96a5a33d220881bf8a972fa76cea1"} Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.111799 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" event={"ID":"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11","Type":"ContainerStarted","Data":"b70715151314b55b96aa622a65ec81f1f43924c1ca90065105ffa258d0695671"} Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.125298 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw"] Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.125315 4995 scope.go:117] "RemoveContainer" containerID="37e8f8085c2c807e076588f197ebb39280f5e01518cbc52a6222631eb1075109" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.128399 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-8445f6f7cd-vxmcw"] Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.131480 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57598776b-7kng9"] Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.133131 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57598776b-7kng9"] Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.153239 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b"] Jan 20 16:35:12 crc kubenswrapper[4995]: E0120 16:35:12.153559 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" containerName="route-controller-manager" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.153578 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" containerName="route-controller-manager" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.153724 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" containerName="route-controller-manager" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.154280 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.155509 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-46s79" podStartSLOduration=2.618438223 podStartE2EDuration="1m19.155491374s" podCreationTimestamp="2026-01-20 16:33:53 +0000 UTC" firstStartedPulling="2026-01-20 16:33:54.503558947 +0000 UTC m=+152.748163753" lastFinishedPulling="2026-01-20 16:35:11.040612078 +0000 UTC m=+229.285216904" observedRunningTime="2026-01-20 16:35:12.147556651 +0000 UTC m=+230.392161467" watchObservedRunningTime="2026-01-20 16:35:12.155491374 +0000 UTC m=+230.400096180" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.158246 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.158277 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.158484 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.158561 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.158651 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.158758 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.161085 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b"] Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.164694 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bq48v" podStartSLOduration=3.642394544 podStartE2EDuration="1m20.164686741s" podCreationTimestamp="2026-01-20 16:33:52 +0000 UTC" firstStartedPulling="2026-01-20 16:33:54.469243876 +0000 UTC m=+152.713848672" lastFinishedPulling="2026-01-20 16:35:10.991536063 +0000 UTC m=+229.236140869" observedRunningTime="2026-01-20 16:35:12.164591489 +0000 UTC m=+230.409196295" watchObservedRunningTime="2026-01-20 16:35:12.164686741 +0000 UTC m=+230.409291547" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.260469 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-client-ca\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.260604 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vf4j\" (UniqueName: \"kubernetes.io/projected/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-kube-api-access-5vf4j\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.260777 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-serving-cert\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.260801 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-config\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.361865 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-serving-cert\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.361936 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-config\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.361980 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-client-ca\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.362020 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vf4j\" (UniqueName: \"kubernetes.io/projected/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-kube-api-access-5vf4j\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.363042 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-client-ca\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.363998 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-config\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.367013 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-serving-cert\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.381032 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vf4j\" (UniqueName: \"kubernetes.io/projected/884e85c4-7ca4-4ac0-81c5-19fee4f0ae40-kube-api-access-5vf4j\") pod \"route-controller-manager-cb49c8d49-87l9b\" (UID: \"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40\") " pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.485227 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:12 crc kubenswrapper[4995]: I0120 16:35:12.906597 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b"] Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.068674 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.069013 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.117948 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.120162 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" event={"ID":"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40","Type":"ContainerStarted","Data":"f0244ecca488f86e4ff846454201138c4d47601d0c461286e43dbfa993ab58aa"} Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.120195 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" event={"ID":"884e85c4-7ca4-4ac0-81c5-19fee4f0ae40","Type":"ContainerStarted","Data":"6d2c410db6a4c2edd5747946010993a2b16992221bf1033aea4249a000d0db14"} Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.120581 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.122561 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" event={"ID":"d7d6474e-b1f9-4310-a3b6-a59b3eefdb11","Type":"ContainerStarted","Data":"b3d98fe8cee72e0e3d4819f03218792bd0a04397a9cd33663656f140d0b518f9"} Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.122774 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.123329 4995 patch_prober.go:28] interesting pod/route-controller-manager-cb49c8d49-87l9b container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.123364 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" podUID="884e85c4-7ca4-4ac0-81c5-19fee4f0ae40" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.127476 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.152903 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" podStartSLOduration=3.152881596 podStartE2EDuration="3.152881596s" podCreationTimestamp="2026-01-20 16:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:35:13.151960511 +0000 UTC m=+231.396565317" watchObservedRunningTime="2026-01-20 16:35:13.152881596 +0000 UTC m=+231.397486412" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.469234 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.469290 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.512310 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:35:13 crc kubenswrapper[4995]: I0120 16:35:13.531787 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6d9449d755-dtx27" podStartSLOduration=3.531769055 podStartE2EDuration="3.531769055s" podCreationTimestamp="2026-01-20 16:35:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:35:13.169451121 +0000 UTC m=+231.414055927" watchObservedRunningTime="2026-01-20 16:35:13.531769055 +0000 UTC m=+231.776373861" Jan 20 16:35:14 crc kubenswrapper[4995]: I0120 16:35:14.019248 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="336c4a64-e3a7-451d-be5a-8a78334e3014" path="/var/lib/kubelet/pods/336c4a64-e3a7-451d-be5a-8a78334e3014/volumes" Jan 20 16:35:14 crc kubenswrapper[4995]: I0120 16:35:14.020317 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c222fc0-ec8f-4403-9339-ee8d2da7fc5e" path="/var/lib/kubelet/pods/5c222fc0-ec8f-4403-9339-ee8d2da7fc5e/volumes" Jan 20 16:35:14 crc kubenswrapper[4995]: I0120 16:35:14.138369 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-cb49c8d49-87l9b" Jan 20 16:35:14 crc kubenswrapper[4995]: I0120 16:35:14.528713 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:35:14 crc kubenswrapper[4995]: I0120 16:35:14.565667 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:35:15 crc kubenswrapper[4995]: I0120 16:35:15.079258 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-8dc2q"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.009173 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w9bjs"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.009812 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w9bjs" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" containerName="registry-server" containerID="cri-o://deea17510e6347a6d2081e2674db926600caaf64989ea2497ceee70d68634953" gracePeriod=30 Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.016781 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nmxgc"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.017374 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nmxgc" podUID="b57345df-b284-4e63-b77d-f60534099876" containerName="registry-server" containerID="cri-o://14b30725d707cdde31224950cf38e488c7381ab7d755adef48f86a4bc08e96c6" gracePeriod=30 Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.025154 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t456c"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.025464 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" containerName="marketplace-operator" containerID="cri-o://596f4058dd61da3eab8cce9db52d78ca2e3c0ae240565ac7b8b66d04bb34609c" gracePeriod=30 Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.030749 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-46s79"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.030910 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-46s79" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerName="registry-server" containerID="cri-o://ab5f422472a7141fb8e77c5148de87bc75f96a5a33d220881bf8a972fa76cea1" gracePeriod=30 Jan 20 16:35:16 crc kubenswrapper[4995]: E0120 16:35:16.036304 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab5f422472a7141fb8e77c5148de87bc75f96a5a33d220881bf8a972fa76cea1" cmd=["grpc_health_probe","-addr=:50051"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.036426 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-l2zqv"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.037064 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: E0120 16:35:16.042804 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab5f422472a7141fb8e77c5148de87bc75f96a5a33d220881bf8a972fa76cea1" cmd=["grpc_health_probe","-addr=:50051"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.044892 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bq48v"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.045129 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bq48v" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerName="registry-server" containerID="cri-o://35644e07268412802f9700c81b7eb1507ee7a53e988751a44b07796942b58984" gracePeriod=30 Jan 20 16:35:16 crc kubenswrapper[4995]: E0120 16:35:16.046426 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab5f422472a7141fb8e77c5148de87bc75f96a5a33d220881bf8a972fa76cea1" cmd=["grpc_health_probe","-addr=:50051"] Jan 20 16:35:16 crc kubenswrapper[4995]: E0120 16:35:16.046466 4995 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-46s79" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerName="registry-server" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.046961 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fz924\" (UniqueName: \"kubernetes.io/projected/9c2404e7-457d-4f79-814d-f6a44e88c749-kube-api-access-fz924\") pod \"marketplace-operator-79b997595-l2zqv\" (UID: \"9c2404e7-457d-4f79-814d-f6a44e88c749\") " pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.047060 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9c2404e7-457d-4f79-814d-f6a44e88c749-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-l2zqv\" (UID: \"9c2404e7-457d-4f79-814d-f6a44e88c749\") " pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.049177 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-twpdq"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.052201 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c2404e7-457d-4f79-814d-f6a44e88c749-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-l2zqv\" (UID: \"9c2404e7-457d-4f79-814d-f6a44e88c749\") " pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.055321 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-l2zqv"] Jan 20 16:35:16 crc kubenswrapper[4995]: E0120 16:35:16.056444 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35644e07268412802f9700c81b7eb1507ee7a53e988751a44b07796942b58984" cmd=["grpc_health_probe","-addr=:50051"] Jan 20 16:35:16 crc kubenswrapper[4995]: E0120 16:35:16.058266 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35644e07268412802f9700c81b7eb1507ee7a53e988751a44b07796942b58984" cmd=["grpc_health_probe","-addr=:50051"] Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.059690 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-z4tw2"] Jan 20 16:35:16 crc kubenswrapper[4995]: E0120 16:35:16.064226 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35644e07268412802f9700c81b7eb1507ee7a53e988751a44b07796942b58984" cmd=["grpc_health_probe","-addr=:50051"] Jan 20 16:35:16 crc kubenswrapper[4995]: E0120 16:35:16.064295 4995 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-bq48v" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerName="registry-server" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.065281 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-z4tw2" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerName="registry-server" containerID="cri-o://af416724f5827a2d74a7d1ca311faff4aadf4e39b8b1bbb1bc86dd87527e0558" gracePeriod=30 Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.140714 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-twpdq" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerName="registry-server" containerID="cri-o://a34d9c4a769bd9623c680053b6dc85d2df654618691071aa051a41eda0d91b6d" gracePeriod=30 Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.153251 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9c2404e7-457d-4f79-814d-f6a44e88c749-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-l2zqv\" (UID: \"9c2404e7-457d-4f79-814d-f6a44e88c749\") " pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.153320 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c2404e7-457d-4f79-814d-f6a44e88c749-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-l2zqv\" (UID: \"9c2404e7-457d-4f79-814d-f6a44e88c749\") " pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.153368 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fz924\" (UniqueName: \"kubernetes.io/projected/9c2404e7-457d-4f79-814d-f6a44e88c749-kube-api-access-fz924\") pod \"marketplace-operator-79b997595-l2zqv\" (UID: \"9c2404e7-457d-4f79-814d-f6a44e88c749\") " pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.156133 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c2404e7-457d-4f79-814d-f6a44e88c749-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-l2zqv\" (UID: \"9c2404e7-457d-4f79-814d-f6a44e88c749\") " pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.162608 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9c2404e7-457d-4f79-814d-f6a44e88c749-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-l2zqv\" (UID: \"9c2404e7-457d-4f79-814d-f6a44e88c749\") " pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.169861 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fz924\" (UniqueName: \"kubernetes.io/projected/9c2404e7-457d-4f79-814d-f6a44e88c749-kube-api-access-fz924\") pod \"marketplace-operator-79b997595-l2zqv\" (UID: \"9c2404e7-457d-4f79-814d-f6a44e88c749\") " pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.350732 4995 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-t456c container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.350784 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.21:8080/healthz\": dial tcp 10.217.0.21:8080: connect: connection refused" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.378565 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:16 crc kubenswrapper[4995]: I0120 16:35:16.789628 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-l2zqv"] Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.147162 4995 generic.go:334] "Generic (PLEG): container finished" podID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerID="a34d9c4a769bd9623c680053b6dc85d2df654618691071aa051a41eda0d91b6d" exitCode=0 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.147237 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-twpdq" event={"ID":"45b2120d-74d7-4b92-90c6-18b7bbe7375e","Type":"ContainerDied","Data":"a34d9c4a769bd9623c680053b6dc85d2df654618691071aa051a41eda0d91b6d"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.147459 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-twpdq" event={"ID":"45b2120d-74d7-4b92-90c6-18b7bbe7375e","Type":"ContainerDied","Data":"c836fe7568026cb8af4baf60858ec3e2a3b54fec325504be90d3502bab5e28a4"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.147474 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c836fe7568026cb8af4baf60858ec3e2a3b54fec325504be90d3502bab5e28a4" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.149667 4995 generic.go:334] "Generic (PLEG): container finished" podID="7087f2d6-d879-419d-bd93-538d617dcc91" containerID="deea17510e6347a6d2081e2674db926600caaf64989ea2497ceee70d68634953" exitCode=0 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.149729 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w9bjs" event={"ID":"7087f2d6-d879-419d-bd93-538d617dcc91","Type":"ContainerDied","Data":"deea17510e6347a6d2081e2674db926600caaf64989ea2497ceee70d68634953"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.150705 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" event={"ID":"9c2404e7-457d-4f79-814d-f6a44e88c749","Type":"ContainerStarted","Data":"69906e78caf75cff82d07f852ecefde2562279199c1d08c9acc1fd8a5e33b1b5"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.152774 4995 generic.go:334] "Generic (PLEG): container finished" podID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerID="35644e07268412802f9700c81b7eb1507ee7a53e988751a44b07796942b58984" exitCode=0 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.152810 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq48v" event={"ID":"85820e5b-3be3-43a1-a954-7ce3719e24b5","Type":"ContainerDied","Data":"35644e07268412802f9700c81b7eb1507ee7a53e988751a44b07796942b58984"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.152825 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bq48v" event={"ID":"85820e5b-3be3-43a1-a954-7ce3719e24b5","Type":"ContainerDied","Data":"e16b43853f8629a9b0655c052979626641d6decaff012ddc2f1fb459994e452e"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.152835 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e16b43853f8629a9b0655c052979626641d6decaff012ddc2f1fb459994e452e" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.154714 4995 generic.go:334] "Generic (PLEG): container finished" podID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerID="ab5f422472a7141fb8e77c5148de87bc75f96a5a33d220881bf8a972fa76cea1" exitCode=0 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.154770 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s79" event={"ID":"58893b4f-0622-48ee-bc1d-24ed2b499606","Type":"ContainerDied","Data":"ab5f422472a7141fb8e77c5148de87bc75f96a5a33d220881bf8a972fa76cea1"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.157898 4995 generic.go:334] "Generic (PLEG): container finished" podID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerID="af416724f5827a2d74a7d1ca311faff4aadf4e39b8b1bbb1bc86dd87527e0558" exitCode=0 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.157953 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4tw2" event={"ID":"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0","Type":"ContainerDied","Data":"af416724f5827a2d74a7d1ca311faff4aadf4e39b8b1bbb1bc86dd87527e0558"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.159540 4995 generic.go:334] "Generic (PLEG): container finished" podID="b57345df-b284-4e63-b77d-f60534099876" containerID="14b30725d707cdde31224950cf38e488c7381ab7d755adef48f86a4bc08e96c6" exitCode=0 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.159581 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nmxgc" event={"ID":"b57345df-b284-4e63-b77d-f60534099876","Type":"ContainerDied","Data":"14b30725d707cdde31224950cf38e488c7381ab7d755adef48f86a4bc08e96c6"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.160644 4995 generic.go:334] "Generic (PLEG): container finished" podID="10d3852f-ae68-471d-8501-a31f353ae0cd" containerID="596f4058dd61da3eab8cce9db52d78ca2e3c0ae240565ac7b8b66d04bb34609c" exitCode=0 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.160667 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" event={"ID":"10d3852f-ae68-471d-8501-a31f353ae0cd","Type":"ContainerDied","Data":"596f4058dd61da3eab8cce9db52d78ca2e3c0ae240565ac7b8b66d04bb34609c"} Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.280897 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.294374 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.469602 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-catalog-content\") pod \"85820e5b-3be3-43a1-a954-7ce3719e24b5\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.469702 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gtvf\" (UniqueName: \"kubernetes.io/projected/45b2120d-74d7-4b92-90c6-18b7bbe7375e-kube-api-access-5gtvf\") pod \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.469743 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-catalog-content\") pod \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.469774 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4p86s\" (UniqueName: \"kubernetes.io/projected/85820e5b-3be3-43a1-a954-7ce3719e24b5-kube-api-access-4p86s\") pod \"85820e5b-3be3-43a1-a954-7ce3719e24b5\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.469790 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-utilities\") pod \"85820e5b-3be3-43a1-a954-7ce3719e24b5\" (UID: \"85820e5b-3be3-43a1-a954-7ce3719e24b5\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.469812 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-utilities\") pod \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\" (UID: \"45b2120d-74d7-4b92-90c6-18b7bbe7375e\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.471114 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-utilities" (OuterVolumeSpecName: "utilities") pod "85820e5b-3be3-43a1-a954-7ce3719e24b5" (UID: "85820e5b-3be3-43a1-a954-7ce3719e24b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.473251 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-utilities" (OuterVolumeSpecName: "utilities") pod "45b2120d-74d7-4b92-90c6-18b7bbe7375e" (UID: "45b2120d-74d7-4b92-90c6-18b7bbe7375e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.475813 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45b2120d-74d7-4b92-90c6-18b7bbe7375e-kube-api-access-5gtvf" (OuterVolumeSpecName: "kube-api-access-5gtvf") pod "45b2120d-74d7-4b92-90c6-18b7bbe7375e" (UID: "45b2120d-74d7-4b92-90c6-18b7bbe7375e"). InnerVolumeSpecName "kube-api-access-5gtvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.475948 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85820e5b-3be3-43a1-a954-7ce3719e24b5-kube-api-access-4p86s" (OuterVolumeSpecName: "kube-api-access-4p86s") pod "85820e5b-3be3-43a1-a954-7ce3719e24b5" (UID: "85820e5b-3be3-43a1-a954-7ce3719e24b5"). InnerVolumeSpecName "kube-api-access-4p86s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.504631 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85820e5b-3be3-43a1-a954-7ce3719e24b5" (UID: "85820e5b-3be3-43a1-a954-7ce3719e24b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.531168 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.570819 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.570850 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gtvf\" (UniqueName: \"kubernetes.io/projected/45b2120d-74d7-4b92-90c6-18b7bbe7375e-kube-api-access-5gtvf\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.570861 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4p86s\" (UniqueName: \"kubernetes.io/projected/85820e5b-3be3-43a1-a954-7ce3719e24b5-kube-api-access-4p86s\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.570869 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85820e5b-3be3-43a1-a954-7ce3719e24b5-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.570877 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.587881 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-twpdq"] Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.653369 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "45b2120d-74d7-4b92-90c6-18b7bbe7375e" (UID: "45b2120d-74d7-4b92-90c6-18b7bbe7375e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.672308 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-utilities\") pod \"7087f2d6-d879-419d-bd93-538d617dcc91\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.672619 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-catalog-content\") pod \"7087f2d6-d879-419d-bd93-538d617dcc91\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.672652 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcjdk\" (UniqueName: \"kubernetes.io/projected/7087f2d6-d879-419d-bd93-538d617dcc91-kube-api-access-zcjdk\") pod \"7087f2d6-d879-419d-bd93-538d617dcc91\" (UID: \"7087f2d6-d879-419d-bd93-538d617dcc91\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.672865 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45b2120d-74d7-4b92-90c6-18b7bbe7375e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.673505 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-utilities" (OuterVolumeSpecName: "utilities") pod "7087f2d6-d879-419d-bd93-538d617dcc91" (UID: "7087f2d6-d879-419d-bd93-538d617dcc91"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.677504 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7087f2d6-d879-419d-bd93-538d617dcc91-kube-api-access-zcjdk" (OuterVolumeSpecName: "kube-api-access-zcjdk") pod "7087f2d6-d879-419d-bd93-538d617dcc91" (UID: "7087f2d6-d879-419d-bd93-538d617dcc91"). InnerVolumeSpecName "kube-api-access-zcjdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.678304 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685367 4995 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685651 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerName="extract-content" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685671 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerName="extract-content" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685682 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerName="extract-utilities" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685688 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerName="extract-utilities" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685696 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerName="extract-content" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685725 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerName="extract-content" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685735 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" containerName="extract-content" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685741 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" containerName="extract-content" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685752 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" containerName="extract-utilities" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685757 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" containerName="extract-utilities" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685805 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57345df-b284-4e63-b77d-f60534099876" containerName="extract-utilities" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685812 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57345df-b284-4e63-b77d-f60534099876" containerName="extract-utilities" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685822 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685828 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685837 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685842 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685850 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685856 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685862 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57345df-b284-4e63-b77d-f60534099876" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685885 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57345df-b284-4e63-b77d-f60534099876" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685894 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerName="extract-utilities" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685900 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerName="extract-utilities" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.685907 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57345df-b284-4e63-b77d-f60534099876" containerName="extract-content" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.685912 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57345df-b284-4e63-b77d-f60534099876" containerName="extract-content" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686055 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686070 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686101 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686107 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57345df-b284-4e63-b77d-f60534099876" containerName="registry-server" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686526 4995 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686762 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1" gracePeriod=15 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686810 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8" gracePeriod=15 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686861 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb" gracePeriod=15 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686810 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64" gracePeriod=15 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.686978 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a" gracePeriod=15 Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688273 4995 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.688478 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688490 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.688501 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688507 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.688520 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688527 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.688535 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688541 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.688572 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688582 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.688590 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688596 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 16:35:17 crc kubenswrapper[4995]: E0120 16:35:17.688606 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688612 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688708 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688718 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688729 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.688738 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.689804 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.689820 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.689599 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.694367 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.729323 4995 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.742109 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.763914 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7087f2d6-d879-419d-bd93-538d617dcc91" (UID: "7087f2d6-d879-419d-bd93-538d617dcc91"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.773887 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.773927 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7087f2d6-d879-419d-bd93-538d617dcc91-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.773941 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcjdk\" (UniqueName: \"kubernetes.io/projected/7087f2d6-d879-419d-bd93-538d617dcc91-kube-api-access-zcjdk\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.838518 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.838966 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.839337 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.852947 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.853602 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.853977 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.854427 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.874871 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-trusted-ca\") pod \"10d3852f-ae68-471d-8501-a31f353ae0cd\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.874927 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-utilities\") pod \"b57345df-b284-4e63-b77d-f60534099876\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.874969 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fd2t2\" (UniqueName: \"kubernetes.io/projected/10d3852f-ae68-471d-8501-a31f353ae0cd-kube-api-access-fd2t2\") pod \"10d3852f-ae68-471d-8501-a31f353ae0cd\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.874999 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9t9wt\" (UniqueName: \"kubernetes.io/projected/b57345df-b284-4e63-b77d-f60534099876-kube-api-access-9t9wt\") pod \"b57345df-b284-4e63-b77d-f60534099876\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875056 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-catalog-content\") pod \"b57345df-b284-4e63-b77d-f60534099876\" (UID: \"b57345df-b284-4e63-b77d-f60534099876\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875121 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-operator-metrics\") pod \"10d3852f-ae68-471d-8501-a31f353ae0cd\" (UID: \"10d3852f-ae68-471d-8501-a31f353ae0cd\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875589 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875627 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875645 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875671 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875652 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "10d3852f-ae68-471d-8501-a31f353ae0cd" (UID: "10d3852f-ae68-471d-8501-a31f353ae0cd"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875693 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875711 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875715 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-utilities" (OuterVolumeSpecName: "utilities") pod "b57345df-b284-4e63-b77d-f60534099876" (UID: "b57345df-b284-4e63-b77d-f60534099876"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875891 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.875933 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.876008 4995 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.876023 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.879264 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "10d3852f-ae68-471d-8501-a31f353ae0cd" (UID: "10d3852f-ae68-471d-8501-a31f353ae0cd"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.879337 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10d3852f-ae68-471d-8501-a31f353ae0cd-kube-api-access-fd2t2" (OuterVolumeSpecName: "kube-api-access-fd2t2") pod "10d3852f-ae68-471d-8501-a31f353ae0cd" (UID: "10d3852f-ae68-471d-8501-a31f353ae0cd"). InnerVolumeSpecName "kube-api-access-fd2t2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.880353 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b57345df-b284-4e63-b77d-f60534099876-kube-api-access-9t9wt" (OuterVolumeSpecName: "kube-api-access-9t9wt") pod "b57345df-b284-4e63-b77d-f60534099876" (UID: "b57345df-b284-4e63-b77d-f60534099876"). InnerVolumeSpecName "kube-api-access-9t9wt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.927457 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b57345df-b284-4e63-b77d-f60534099876" (UID: "b57345df-b284-4e63-b77d-f60534099876"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977260 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2wq7\" (UniqueName: \"kubernetes.io/projected/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-kube-api-access-n2wq7\") pod \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977327 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxmqb\" (UniqueName: \"kubernetes.io/projected/58893b4f-0622-48ee-bc1d-24ed2b499606-kube-api-access-qxmqb\") pod \"58893b4f-0622-48ee-bc1d-24ed2b499606\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977377 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-catalog-content\") pod \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977395 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-utilities\") pod \"58893b4f-0622-48ee-bc1d-24ed2b499606\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977427 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-catalog-content\") pod \"58893b4f-0622-48ee-bc1d-24ed2b499606\" (UID: \"58893b4f-0622-48ee-bc1d-24ed2b499606\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977462 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-utilities\") pod \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\" (UID: \"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0\") " Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977566 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977588 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977616 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977632 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977650 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977675 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977692 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.977725 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.978448 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-utilities" (OuterVolumeSpecName: "utilities") pod "affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" (UID: "affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.979683 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-utilities" (OuterVolumeSpecName: "utilities") pod "58893b4f-0622-48ee-bc1d-24ed2b499606" (UID: "58893b4f-0622-48ee-bc1d-24ed2b499606"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980270 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b57345df-b284-4e63-b77d-f60534099876-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980583 4995 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/10d3852f-ae68-471d-8501-a31f353ae0cd-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980379 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980429 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980707 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-kube-api-access-n2wq7" (OuterVolumeSpecName: "kube-api-access-n2wq7") pod "affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" (UID: "affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0"). InnerVolumeSpecName "kube-api-access-n2wq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980439 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980453 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980445 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980490 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980654 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fd2t2\" (UniqueName: \"kubernetes.io/projected/10d3852f-ae68-471d-8501-a31f353ae0cd-kube-api-access-fd2t2\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980827 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9t9wt\" (UniqueName: \"kubernetes.io/projected/b57345df-b284-4e63-b77d-f60534099876-kube-api-access-9t9wt\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980397 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.980466 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:17 crc kubenswrapper[4995]: I0120 16:35:17.981515 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58893b4f-0622-48ee-bc1d-24ed2b499606-kube-api-access-qxmqb" (OuterVolumeSpecName: "kube-api-access-qxmqb") pod "58893b4f-0622-48ee-bc1d-24ed2b499606" (UID: "58893b4f-0622-48ee-bc1d-24ed2b499606"). InnerVolumeSpecName "kube-api-access-qxmqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.010577 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58893b4f-0622-48ee-bc1d-24ed2b499606" (UID: "58893b4f-0622-48ee-bc1d-24ed2b499606"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.043489 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:35:18 crc kubenswrapper[4995]: W0120 16:35:18.061819 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-f886825a65c58ccdb04f932c15814198a07284bc455817711cb1f8b6d7c99af0 WatchSource:0}: Error finding container f886825a65c58ccdb04f932c15814198a07284bc455817711cb1f8b6d7c99af0: Status 404 returned error can't find the container with id f886825a65c58ccdb04f932c15814198a07284bc455817711cb1f8b6d7c99af0 Jan 20 16:35:18 crc kubenswrapper[4995]: E0120 16:35:18.064191 4995 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.143:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188c7da8e8cb2386 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-20 16:35:18.063367046 +0000 UTC m=+236.307971852,LastTimestamp:2026-01-20 16:35:18.063367046 +0000 UTC m=+236.307971852,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.082412 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.082981 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2wq7\" (UniqueName: \"kubernetes.io/projected/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-kube-api-access-n2wq7\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.082997 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxmqb\" (UniqueName: \"kubernetes.io/projected/58893b4f-0622-48ee-bc1d-24ed2b499606-kube-api-access-qxmqb\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.083011 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.083022 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58893b4f-0622-48ee-bc1d-24ed2b499606-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.139608 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" (UID: "affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.167860 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.168434 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" event={"ID":"10d3852f-ae68-471d-8501-a31f353ae0cd","Type":"ContainerDied","Data":"c03c5750fe8641bd8c6c30c1bae26f1503440d5cc9ed4afae5e79605dbf892db"} Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.168464 4995 scope.go:117] "RemoveContainer" containerID="596f4058dd61da3eab8cce9db52d78ca2e3c0ae240565ac7b8b66d04bb34609c" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.169275 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.169660 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.170193 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.170702 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.174548 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nmxgc" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.174577 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nmxgc" event={"ID":"b57345df-b284-4e63-b77d-f60534099876","Type":"ContainerDied","Data":"908d0169fc8074d8798d93527fa5a71467ed74dc5cca5cabcf16f5a03ba0f7a0"} Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.175166 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.175512 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.175796 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.176256 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.176693 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.176950 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.177231 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.178363 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.178566 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.179324 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.179666 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.180109 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.180638 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.181063 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.181415 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.181450 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.182237 4995 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8" exitCode=0 Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.182253 4995 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a" exitCode=0 Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.182284 4995 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64" exitCode=0 Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.182291 4995 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb" exitCode=2 Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.186040 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.187630 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w9bjs" event={"ID":"7087f2d6-d879-419d-bd93-538d617dcc91","Type":"ContainerDied","Data":"b05e2a30b27c31c5276f3d98e49c4bb30339518122a37efbb4ca4f9d078c3458"} Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.187657 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w9bjs" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.188755 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.189010 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.189523 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.190350 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.190700 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.190996 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.191498 4995 scope.go:117] "RemoveContainer" containerID="14b30725d707cdde31224950cf38e488c7381ab7d755adef48f86a4bc08e96c6" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.191498 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.191738 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.192014 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-z4tw2" event={"ID":"affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0","Type":"ContainerDied","Data":"ea848ff5a0b74dcf4478bf439d09714f7e5755476db3dd18efd629b975b95e68"} Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.192027 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.192045 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-z4tw2" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.192391 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.193223 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.193959 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.194749 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.194949 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.195291 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.195391 4995 generic.go:334] "Generic (PLEG): container finished" podID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" containerID="b0f5655a3bc0a706fd14ae5f3ebc225b155bc17171c6b2019a4922130d279b61" exitCode=0 Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.195455 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"758090f5-45ed-44f6-9fdc-0af9eac7d6ea","Type":"ContainerDied","Data":"b0f5655a3bc0a706fd14ae5f3ebc225b155bc17171c6b2019a4922130d279b61"} Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.195513 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.195951 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.196635 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"f886825a65c58ccdb04f932c15814198a07284bc455817711cb1f8b6d7c99af0"} Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.196759 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.197164 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.197581 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.198033 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.198526 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.198501 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" event={"ID":"9c2404e7-457d-4f79-814d-f6a44e88c749","Type":"ContainerStarted","Data":"08cf864e8c71c27726c93a1b81735d2090ee2477edac65ebfcccb24728715516"} Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.198694 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.199006 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.199206 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.199526 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.202634 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.202878 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.203105 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.203301 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.203813 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.204039 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.204227 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.204408 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.205052 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bq48v" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.205144 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-46s79" event={"ID":"58893b4f-0622-48ee-bc1d-24ed2b499606","Type":"ContainerDied","Data":"7b7e2788485e95537778c6e07cedc9b4279ff4ba9a3b70d58b6eef3f958084fa"} Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.205202 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-46s79" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.205439 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-twpdq" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.205573 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.206088 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.206523 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.206998 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.207264 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.207833 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.208209 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.208545 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.208985 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.209215 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.209399 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.212365 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.212876 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.213485 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.213840 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.214472 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.215120 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.215794 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.216386 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.216999 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.217423 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.217901 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.220884 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.221877 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.222227 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.223379 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.223865 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.224053 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.224138 4995 scope.go:117] "RemoveContainer" containerID="2c4fb876e9dc017f6f61c20ccc99277383c0add22a61c1bb27d698fdc039acba" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.224331 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.227487 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.237192 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.239698 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.241482 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.242600 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.243691 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.244652 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.245207 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.245438 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.245694 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.246011 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.246219 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.260853 4995 scope.go:117] "RemoveContainer" containerID="e94f307c6deeaf0dca03c62c6ee702a15e7971894a14ac597f10e6abf8a5fa36" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.285246 4995 scope.go:117] "RemoveContainer" containerID="c2655976b306ee59a42aea4d9ff9eaa31f4028bf6d7a935fbdf7ca518bce0acd" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.317300 4995 scope.go:117] "RemoveContainer" containerID="deea17510e6347a6d2081e2674db926600caaf64989ea2497ceee70d68634953" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.334669 4995 scope.go:117] "RemoveContainer" containerID="4514eb97b79ac3d97e7e923975e792a3337eef4e862008949de64ce0f8275e63" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.362091 4995 scope.go:117] "RemoveContainer" containerID="fc9cd0d4b1c1cc00110675615b53d9bbd17ada7cfdb7e49584e80ddbfa40f31a" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.376345 4995 scope.go:117] "RemoveContainer" containerID="af416724f5827a2d74a7d1ca311faff4aadf4e39b8b1bbb1bc86dd87527e0558" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.388931 4995 scope.go:117] "RemoveContainer" containerID="f52ea2b342c8caff3972eeda0448f4608556b8476789ae6e2cfcedad8e66d64e" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.408384 4995 scope.go:117] "RemoveContainer" containerID="592bcc41ec7013de722f599526a52bc4ee19f36671744ae6b10b2ee8dbf7d5f2" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.426426 4995 scope.go:117] "RemoveContainer" containerID="ab5f422472a7141fb8e77c5148de87bc75f96a5a33d220881bf8a972fa76cea1" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.439019 4995 scope.go:117] "RemoveContainer" containerID="a63a5ad96425fae4f1694dda17079a3c51568e08edafcb0cb1fb4541757b8abd" Jan 20 16:35:18 crc kubenswrapper[4995]: I0120 16:35:18.449768 4995 scope.go:117] "RemoveContainer" containerID="5333c9e48b15a0c2a42dfb3199feab7e7aa721556f72c2c30bc6f04e5e742e06" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.216995 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a"} Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.219364 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.219602 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.219931 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.220286 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.220324 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.220886 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.221182 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.221659 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.222001 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.222267 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.222556 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.646277 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.647580 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.648059 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.648597 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.649051 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.649500 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.649895 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.650382 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.650956 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.651301 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.651701 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.703843 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-var-lock\") pod \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.703902 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kubelet-dir\") pod \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.703972 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kube-api-access\") pod \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\" (UID: \"758090f5-45ed-44f6-9fdc-0af9eac7d6ea\") " Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.704023 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "758090f5-45ed-44f6-9fdc-0af9eac7d6ea" (UID: "758090f5-45ed-44f6-9fdc-0af9eac7d6ea"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.704016 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-var-lock" (OuterVolumeSpecName: "var-lock") pod "758090f5-45ed-44f6-9fdc-0af9eac7d6ea" (UID: "758090f5-45ed-44f6-9fdc-0af9eac7d6ea"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.704481 4995 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-var-lock\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.704532 4995 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.713423 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "758090f5-45ed-44f6-9fdc-0af9eac7d6ea" (UID: "758090f5-45ed-44f6-9fdc-0af9eac7d6ea"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:19 crc kubenswrapper[4995]: I0120 16:35:19.805568 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/758090f5-45ed-44f6-9fdc-0af9eac7d6ea-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.166521 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.167654 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.168208 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.168471 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.168683 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.168936 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.169171 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.169392 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.169605 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.169823 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.170107 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.170414 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.170603 4995 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.213983 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.214038 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.214142 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.214160 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.214251 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.214385 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.214572 4995 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.214596 4995 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.214609 4995 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.230986 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"758090f5-45ed-44f6-9fdc-0af9eac7d6ea","Type":"ContainerDied","Data":"74667134e68478dbfc8ebf12d28f24c124fa99e82ecc176c173078bc77af398f"} Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.231020 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74667134e68478dbfc8ebf12d28f24c124fa99e82ecc176c173078bc77af398f" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.231029 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.234279 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.234985 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.235405 4995 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1" exitCode=0 Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.235465 4995 scope.go:117] "RemoveContainer" containerID="1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.235475 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.235454 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.236443 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.237327 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.237646 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.237929 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.238305 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.238663 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.238973 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.239268 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.239500 4995 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.248699 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.249201 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.249393 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.249588 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.249783 4995 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.249983 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.250201 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.250416 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.250620 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.250811 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.251000 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.253311 4995 scope.go:117] "RemoveContainer" containerID="d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.266276 4995 scope.go:117] "RemoveContainer" containerID="c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.281863 4995 scope.go:117] "RemoveContainer" containerID="39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.296366 4995 scope.go:117] "RemoveContainer" containerID="12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.313024 4995 scope.go:117] "RemoveContainer" containerID="99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.330258 4995 scope.go:117] "RemoveContainer" containerID="1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8" Jan 20 16:35:20 crc kubenswrapper[4995]: E0120 16:35:20.330737 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\": container with ID starting with 1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8 not found: ID does not exist" containerID="1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.330791 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8"} err="failed to get container status \"1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\": rpc error: code = NotFound desc = could not find container \"1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8\": container with ID starting with 1264a7c9fb6b4bdde18d84c36999c6fa3349e302d6a4d20c30ed48992857f0a8 not found: ID does not exist" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.330826 4995 scope.go:117] "RemoveContainer" containerID="d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a" Jan 20 16:35:20 crc kubenswrapper[4995]: E0120 16:35:20.331190 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\": container with ID starting with d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a not found: ID does not exist" containerID="d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.331223 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a"} err="failed to get container status \"d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\": rpc error: code = NotFound desc = could not find container \"d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a\": container with ID starting with d362fcb0a965bb39f53d862376af656f235677a5a367d7e432d5834c3b32543a not found: ID does not exist" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.331241 4995 scope.go:117] "RemoveContainer" containerID="c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64" Jan 20 16:35:20 crc kubenswrapper[4995]: E0120 16:35:20.331488 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\": container with ID starting with c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64 not found: ID does not exist" containerID="c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.331515 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64"} err="failed to get container status \"c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\": rpc error: code = NotFound desc = could not find container \"c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64\": container with ID starting with c9dfa013d176fe13b1d913e20fde03735867236425c0c3889bc0d5e300a02c64 not found: ID does not exist" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.331532 4995 scope.go:117] "RemoveContainer" containerID="39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb" Jan 20 16:35:20 crc kubenswrapper[4995]: E0120 16:35:20.331764 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\": container with ID starting with 39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb not found: ID does not exist" containerID="39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.331792 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb"} err="failed to get container status \"39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\": rpc error: code = NotFound desc = could not find container \"39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb\": container with ID starting with 39282cba63785e421b60039ad13d825b49c06ed8436df6fbce89db75377253fb not found: ID does not exist" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.331809 4995 scope.go:117] "RemoveContainer" containerID="12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1" Jan 20 16:35:20 crc kubenswrapper[4995]: E0120 16:35:20.332050 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\": container with ID starting with 12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1 not found: ID does not exist" containerID="12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.332106 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1"} err="failed to get container status \"12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\": rpc error: code = NotFound desc = could not find container \"12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1\": container with ID starting with 12b54a4291fd40f90513411c4bf349d1b01c0a8bc6f4d9dd0ac6f41113f980e1 not found: ID does not exist" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.332125 4995 scope.go:117] "RemoveContainer" containerID="99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3" Jan 20 16:35:20 crc kubenswrapper[4995]: E0120 16:35:20.332360 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\": container with ID starting with 99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3 not found: ID does not exist" containerID="99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3" Jan 20 16:35:20 crc kubenswrapper[4995]: I0120 16:35:20.332388 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3"} err="failed to get container status \"99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\": rpc error: code = NotFound desc = could not find container \"99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3\": container with ID starting with 99784b55168781ccd7655a0c1ec2d5881cac3eaa1a27ff2dd39dc28ab6c9e2e3 not found: ID does not exist" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.014109 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.015568 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.016236 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.016813 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.017342 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.017988 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.018557 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.018932 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.019446 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.020009 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.020489 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: I0120 16:35:22.020952 4995 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:22 crc kubenswrapper[4995]: E0120 16:35:22.669352 4995 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.143:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188c7da8e8cb2386 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-20 16:35:18.063367046 +0000 UTC m=+236.307971852,LastTimestamp:2026-01-20 16:35:18.063367046 +0000 UTC m=+236.307971852,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 20 16:35:27 crc kubenswrapper[4995]: E0120 16:35:27.445165 4995 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: E0120 16:35:27.446224 4995 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: E0120 16:35:27.446590 4995 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: E0120 16:35:27.447035 4995 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: E0120 16:35:27.447462 4995 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.447528 4995 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 20 16:35:27 crc kubenswrapper[4995]: E0120 16:35:27.447829 4995 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="200ms" Jan 20 16:35:27 crc kubenswrapper[4995]: E0120 16:35:27.648268 4995 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="400ms" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.989417 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.990182 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.991207 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.992813 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.993363 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.993837 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.994474 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.994944 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.996573 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.997181 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:27 crc kubenswrapper[4995]: I0120 16:35:27.997513 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:28 crc kubenswrapper[4995]: I0120 16:35:28.004384 4995 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:28 crc kubenswrapper[4995]: I0120 16:35:28.004414 4995 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:28 crc kubenswrapper[4995]: E0120 16:35:28.004861 4995 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:28 crc kubenswrapper[4995]: I0120 16:35:28.005326 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:28 crc kubenswrapper[4995]: W0120 16:35:28.036107 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-24962316f63a2795a7bdb2252bf195291ea9a8fc2cbb3437d2eb1c5fe9eedd00 WatchSource:0}: Error finding container 24962316f63a2795a7bdb2252bf195291ea9a8fc2cbb3437d2eb1c5fe9eedd00: Status 404 returned error can't find the container with id 24962316f63a2795a7bdb2252bf195291ea9a8fc2cbb3437d2eb1c5fe9eedd00 Jan 20 16:35:28 crc kubenswrapper[4995]: E0120 16:35:28.049201 4995 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="800ms" Jan 20 16:35:28 crc kubenswrapper[4995]: I0120 16:35:28.280915 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"24962316f63a2795a7bdb2252bf195291ea9a8fc2cbb3437d2eb1c5fe9eedd00"} Jan 20 16:35:28 crc kubenswrapper[4995]: E0120 16:35:28.850686 4995 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="1.6s" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.289019 4995 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="71b7241c0602bc8ee0b092dc447001cedd8a1c01f38796aab078d58f0968bb8f" exitCode=0 Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.289062 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"71b7241c0602bc8ee0b092dc447001cedd8a1c01f38796aab078d58f0968bb8f"} Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.289571 4995 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.289902 4995 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.290063 4995 status_manager.go:851] "Failed to get status for pod" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" pod="openshift-marketplace/redhat-marketplace-bq48v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bq48v\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:29 crc kubenswrapper[4995]: E0120 16:35:29.290688 4995 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.290655 4995 status_manager.go:851] "Failed to get status for pod" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" pod="openshift-marketplace/marketplace-operator-79b997595-t456c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-t456c\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.291444 4995 status_manager.go:851] "Failed to get status for pod" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" pod="openshift-marketplace/redhat-operators-z4tw2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-z4tw2\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.291931 4995 status_manager.go:851] "Failed to get status for pod" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" pod="openshift-marketplace/redhat-operators-twpdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-twpdq\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.293487 4995 status_manager.go:851] "Failed to get status for pod" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.293898 4995 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.294399 4995 status_manager.go:851] "Failed to get status for pod" podUID="b57345df-b284-4e63-b77d-f60534099876" pod="openshift-marketplace/community-operators-nmxgc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-nmxgc\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.294841 4995 status_manager.go:851] "Failed to get status for pod" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" pod="openshift-marketplace/certified-operators-w9bjs" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-w9bjs\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.295389 4995 status_manager.go:851] "Failed to get status for pod" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" pod="openshift-marketplace/redhat-marketplace-46s79" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-46s79\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:29 crc kubenswrapper[4995]: I0120 16:35:29.295833 4995 status_manager.go:851] "Failed to get status for pod" podUID="9c2404e7-457d-4f79-814d-f6a44e88c749" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-l2zqv\": dial tcp 38.102.83.143:6443: connect: connection refused" Jan 20 16:35:30 crc kubenswrapper[4995]: I0120 16:35:30.299376 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"53d503190eaa0c29fe97b81314d8a907f226a21ba68ff1f48a0378da9ca3b47c"} Jan 20 16:35:30 crc kubenswrapper[4995]: I0120 16:35:30.299829 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"8f3bb7108805bb37a873de211248c2f3de8239dfbdef89532b0bc84a2fdb3e77"} Jan 20 16:35:31 crc kubenswrapper[4995]: I0120 16:35:31.310514 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5a3567801d518cca1f1c52a202759af717b51c70ae841ed7afc9b2520150e09c"} Jan 20 16:35:31 crc kubenswrapper[4995]: I0120 16:35:31.310910 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"197f37f9cdb2987690cc99e607b5d9c261d77826eec0a99c39ccceed06fd78c1"} Jan 20 16:35:31 crc kubenswrapper[4995]: I0120 16:35:31.310927 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e7d0951aefe56be4cc4dbdeeb544f7ce3c9b2170af30f04c424ad7d29cba032c"} Jan 20 16:35:31 crc kubenswrapper[4995]: I0120 16:35:31.310983 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:31 crc kubenswrapper[4995]: I0120 16:35:31.311159 4995 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:31 crc kubenswrapper[4995]: I0120 16:35:31.311194 4995 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:32 crc kubenswrapper[4995]: I0120 16:35:32.318638 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 20 16:35:32 crc kubenswrapper[4995]: I0120 16:35:32.318701 4995 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4" exitCode=1 Jan 20 16:35:32 crc kubenswrapper[4995]: I0120 16:35:32.318739 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4"} Jan 20 16:35:32 crc kubenswrapper[4995]: I0120 16:35:32.319426 4995 scope.go:117] "RemoveContainer" containerID="c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4" Jan 20 16:35:33 crc kubenswrapper[4995]: I0120 16:35:33.005458 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:33 crc kubenswrapper[4995]: I0120 16:35:33.005517 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:33 crc kubenswrapper[4995]: I0120 16:35:33.014470 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:33 crc kubenswrapper[4995]: I0120 16:35:33.331441 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 20 16:35:33 crc kubenswrapper[4995]: I0120 16:35:33.331519 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"411ae16791dd1c539512c48733da9913f68217c1757a8eacc81c8925e777a371"} Jan 20 16:35:36 crc kubenswrapper[4995]: I0120 16:35:36.330238 4995 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:36 crc kubenswrapper[4995]: I0120 16:35:36.563626 4995 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="45760428-03b4-4d5e-87d8-04d7f6271b01" Jan 20 16:35:37 crc kubenswrapper[4995]: I0120 16:35:37.357331 4995 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:37 crc kubenswrapper[4995]: I0120 16:35:37.357724 4995 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:37 crc kubenswrapper[4995]: I0120 16:35:37.361990 4995 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="45760428-03b4-4d5e-87d8-04d7f6271b01" Jan 20 16:35:37 crc kubenswrapper[4995]: I0120 16:35:37.366524 4995 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://8f3bb7108805bb37a873de211248c2f3de8239dfbdef89532b0bc84a2fdb3e77" Jan 20 16:35:37 crc kubenswrapper[4995]: I0120 16:35:37.366550 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:38 crc kubenswrapper[4995]: I0120 16:35:38.202960 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 16:35:38 crc kubenswrapper[4995]: I0120 16:35:38.203223 4995 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 20 16:35:38 crc kubenswrapper[4995]: I0120 16:35:38.203305 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 20 16:35:38 crc kubenswrapper[4995]: I0120 16:35:38.363868 4995 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:38 crc kubenswrapper[4995]: I0120 16:35:38.363912 4995 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:38 crc kubenswrapper[4995]: I0120 16:35:38.366946 4995 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="45760428-03b4-4d5e-87d8-04d7f6271b01" Jan 20 16:35:39 crc kubenswrapper[4995]: I0120 16:35:39.394550 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.117580 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" podUID="2a8fc6c3-ef12-4e57-a446-0cfed712d95e" containerName="oauth-openshift" containerID="cri-o://b4df384ccf60edc369c96829c54731583511fb6ab481c63a0cf09303e8edf188" gracePeriod=15 Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.376770 4995 generic.go:334] "Generic (PLEG): container finished" podID="2a8fc6c3-ef12-4e57-a446-0cfed712d95e" containerID="b4df384ccf60edc369c96829c54731583511fb6ab481c63a0cf09303e8edf188" exitCode=0 Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.376957 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" event={"ID":"2a8fc6c3-ef12-4e57-a446-0cfed712d95e","Type":"ContainerDied","Data":"b4df384ccf60edc369c96829c54731583511fb6ab481c63a0cf09303e8edf188"} Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.617563 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.696402 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-error\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.696478 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-router-certs\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.696793 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.696517 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-dir\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697287 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tp775\" (UniqueName: \"kubernetes.io/projected/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-kube-api-access-tp775\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697371 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-policies\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697415 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-provider-selection\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697484 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-login\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697563 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-serving-cert\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697644 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-service-ca\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697702 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-ocp-branding-template\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697749 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-session\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697799 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-trusted-ca-bundle\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697868 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-idp-0-file-data\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.697930 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-cliconfig\") pod \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\" (UID: \"2a8fc6c3-ef12-4e57-a446-0cfed712d95e\") " Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.698378 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.698546 4995 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.698578 4995 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.698815 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.699244 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.699309 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.704279 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-kube-api-access-tp775" (OuterVolumeSpecName: "kube-api-access-tp775") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "kube-api-access-tp775". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.705676 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.705849 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.706126 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.706456 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.707511 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.708535 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.709361 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.712344 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "2a8fc6c3-ef12-4e57-a446-0cfed712d95e" (UID: "2a8fc6c3-ef12-4e57-a446-0cfed712d95e"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801126 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801194 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801226 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tp775\" (UniqueName: \"kubernetes.io/projected/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-kube-api-access-tp775\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801256 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801293 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801319 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801345 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801372 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801399 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801425 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801449 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:40 crc kubenswrapper[4995]: I0120 16:35:40.801474 4995 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2a8fc6c3-ef12-4e57-a446-0cfed712d95e-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 20 16:35:41 crc kubenswrapper[4995]: I0120 16:35:41.392469 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" event={"ID":"2a8fc6c3-ef12-4e57-a446-0cfed712d95e","Type":"ContainerDied","Data":"dcad1e59f7803f8c1f41ddc891113131dbfd8dc7b89f435ed160280f1b03755e"} Jan 20 16:35:41 crc kubenswrapper[4995]: I0120 16:35:41.392609 4995 scope.go:117] "RemoveContainer" containerID="b4df384ccf60edc369c96829c54731583511fb6ab481c63a0cf09303e8edf188" Jan 20 16:35:41 crc kubenswrapper[4995]: I0120 16:35:41.392783 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-8dc2q" Jan 20 16:35:43 crc kubenswrapper[4995]: I0120 16:35:43.119527 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 20 16:35:43 crc kubenswrapper[4995]: I0120 16:35:43.769013 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 20 16:35:44 crc kubenswrapper[4995]: I0120 16:35:44.008943 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 20 16:35:44 crc kubenswrapper[4995]: I0120 16:35:44.351520 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 20 16:35:44 crc kubenswrapper[4995]: I0120 16:35:44.809825 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 20 16:35:44 crc kubenswrapper[4995]: I0120 16:35:44.837495 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 20 16:35:45 crc kubenswrapper[4995]: I0120 16:35:45.869744 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 20 16:35:46 crc kubenswrapper[4995]: I0120 16:35:46.190281 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 20 16:35:46 crc kubenswrapper[4995]: I0120 16:35:46.212390 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 20 16:35:46 crc kubenswrapper[4995]: I0120 16:35:46.598134 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 20 16:35:47 crc kubenswrapper[4995]: I0120 16:35:47.052908 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 20 16:35:47 crc kubenswrapper[4995]: I0120 16:35:47.192350 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 20 16:35:47 crc kubenswrapper[4995]: I0120 16:35:47.479105 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 20 16:35:47 crc kubenswrapper[4995]: I0120 16:35:47.539884 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 20 16:35:47 crc kubenswrapper[4995]: I0120 16:35:47.739107 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 20 16:35:47 crc kubenswrapper[4995]: I0120 16:35:47.883286 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.203682 4995 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.203765 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.265468 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.377478 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.494451 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.792222 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.792618 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.803939 4995 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.837188 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.869141 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.875104 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 20 16:35:48 crc kubenswrapper[4995]: I0120 16:35:48.969942 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 20 16:35:49 crc kubenswrapper[4995]: I0120 16:35:49.090236 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 20 16:35:49 crc kubenswrapper[4995]: I0120 16:35:49.260531 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 20 16:35:49 crc kubenswrapper[4995]: I0120 16:35:49.651960 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.059595 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.082551 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.158033 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.196412 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.386043 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.455280 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.726188 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.757601 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.792462 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.868461 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.934144 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 20 16:35:50 crc kubenswrapper[4995]: I0120 16:35:50.994718 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 20 16:35:51 crc kubenswrapper[4995]: I0120 16:35:51.253664 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 20 16:35:51 crc kubenswrapper[4995]: I0120 16:35:51.266454 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 20 16:35:51 crc kubenswrapper[4995]: I0120 16:35:51.751889 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 20 16:35:51 crc kubenswrapper[4995]: I0120 16:35:51.759719 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 20 16:35:51 crc kubenswrapper[4995]: I0120 16:35:51.799264 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 20 16:35:51 crc kubenswrapper[4995]: I0120 16:35:51.811206 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 20 16:35:51 crc kubenswrapper[4995]: I0120 16:35:51.890408 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.018882 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.062125 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.143701 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.147042 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.313954 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.341124 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.455046 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.457198 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.461246 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.490544 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.721989 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.872674 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.925674 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 20 16:35:52 crc kubenswrapper[4995]: I0120 16:35:52.988343 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.013475 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.115990 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.269169 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.292517 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.401263 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.459626 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.472813 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.504663 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.545237 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.725609 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.738259 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.774154 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.915788 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 20 16:35:53 crc kubenswrapper[4995]: I0120 16:35:53.963468 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.044597 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.127352 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.186749 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.388450 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.396378 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.472825 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.564402 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.593248 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.634887 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.635305 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.799566 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.827465 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.882026 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.886500 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.899579 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.912020 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.924423 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 20 16:35:54 crc kubenswrapper[4995]: I0120 16:35:54.956995 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.011867 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.025620 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.064939 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.101686 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.130588 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.137418 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.241800 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.288364 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.415638 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.472289 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.501874 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.504585 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.555049 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.646520 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.650198 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.670067 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.765597 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.769220 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.839521 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.908690 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 20 16:35:55 crc kubenswrapper[4995]: I0120 16:35:55.945827 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.020724 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.039582 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.083820 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.256152 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.325660 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.396947 4995 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.433894 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.481941 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.503802 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.515238 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.589178 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.589735 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.619631 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.649044 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.651057 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.660434 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.681374 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.718204 4995 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.724310 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=39.724283073 podStartE2EDuration="39.724283073s" podCreationTimestamp="2026-01-20 16:35:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:35:36.391323721 +0000 UTC m=+254.635928577" watchObservedRunningTime="2026-01-20 16:35:56.724283073 +0000 UTC m=+274.968887919" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.724786 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-l2zqv" podStartSLOduration=40.724774517 podStartE2EDuration="40.724774517s" podCreationTimestamp="2026-01-20 16:35:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:35:36.481040375 +0000 UTC m=+254.725645191" watchObservedRunningTime="2026-01-20 16:35:56.724774517 +0000 UTC m=+274.969379363" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.727488 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-t456c","openshift-kube-apiserver/kube-apiserver-crc","openshift-marketplace/redhat-operators-z4tw2","openshift-marketplace/certified-operators-w9bjs","openshift-marketplace/community-operators-nmxgc","openshift-marketplace/redhat-marketplace-bq48v","openshift-marketplace/redhat-operators-twpdq","openshift-authentication/oauth-openshift-558db77b4-8dc2q","openshift-marketplace/redhat-marketplace-46s79"] Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.727903 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-766989869-rz6qj","openshift-kube-apiserver/kube-apiserver-crc"] Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728396 4995 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728434 4995 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3574abb5-159d-4e7c-b894-7233c1798084" Jan 20 16:35:56 crc kubenswrapper[4995]: E0120 16:35:56.728484 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" containerName="installer" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728506 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" containerName="installer" Jan 20 16:35:56 crc kubenswrapper[4995]: E0120 16:35:56.728517 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerName="extract-utilities" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728523 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerName="extract-utilities" Jan 20 16:35:56 crc kubenswrapper[4995]: E0120 16:35:56.728532 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerName="extract-content" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728538 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerName="extract-content" Jan 20 16:35:56 crc kubenswrapper[4995]: E0120 16:35:56.728552 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerName="extract-content" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728559 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerName="extract-content" Jan 20 16:35:56 crc kubenswrapper[4995]: E0120 16:35:56.728571 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" containerName="marketplace-operator" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728579 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" containerName="marketplace-operator" Jan 20 16:35:56 crc kubenswrapper[4995]: E0120 16:35:56.728587 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerName="registry-server" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728596 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerName="registry-server" Jan 20 16:35:56 crc kubenswrapper[4995]: E0120 16:35:56.728605 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerName="extract-utilities" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728611 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerName="extract-utilities" Jan 20 16:35:56 crc kubenswrapper[4995]: E0120 16:35:56.728618 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a8fc6c3-ef12-4e57-a446-0cfed712d95e" containerName="oauth-openshift" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728623 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a8fc6c3-ef12-4e57-a446-0cfed712d95e" containerName="oauth-openshift" Jan 20 16:35:56 crc kubenswrapper[4995]: E0120 16:35:56.728632 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerName="registry-server" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728639 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerName="registry-server" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728721 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a8fc6c3-ef12-4e57-a446-0cfed712d95e" containerName="oauth-openshift" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728730 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" containerName="registry-server" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728742 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="758090f5-45ed-44f6-9fdc-0af9eac7d6ea" containerName="installer" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728752 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" containerName="registry-server" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.728760 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" containerName="marketplace-operator" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.729151 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.731791 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.732024 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.733299 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.734102 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.734606 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.734763 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.735424 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.736028 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.736214 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.736377 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.737032 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.737217 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.737357 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.743580 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.746504 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.755263 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.760403 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=20.760387877 podStartE2EDuration="20.760387877s" podCreationTimestamp="2026-01-20 16:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:35:56.756677365 +0000 UTC m=+275.001282191" watchObservedRunningTime="2026-01-20 16:35:56.760387877 +0000 UTC m=+275.004992683" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.814396 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842260 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-template-error\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842321 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842347 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-audit-policies\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842367 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-template-login\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842394 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842477 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842592 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-session\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842655 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-serving-cert\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842710 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a9fff63-1343-494d-a71b-f1226f430876-audit-dir\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842732 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842823 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-router-certs\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842845 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-service-ca\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842881 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt7ws\" (UniqueName: \"kubernetes.io/projected/2a9fff63-1343-494d-a71b-f1226f430876-kube-api-access-nt7ws\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.842910 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-cliconfig\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.900666 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.943860 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.943923 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-audit-policies\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.943942 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-template-login\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.943984 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.943999 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.944017 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-session\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.944601 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.944700 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-serving-cert\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.944732 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a9fff63-1343-494d-a71b-f1226f430876-audit-dir\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.944881 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.944772 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-audit-policies\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.945006 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-router-certs\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.945342 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-service-ca\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.945412 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt7ws\" (UniqueName: \"kubernetes.io/projected/2a9fff63-1343-494d-a71b-f1226f430876-kube-api-access-nt7ws\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.945452 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-cliconfig\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.945512 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-template-error\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.945287 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.944823 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2a9fff63-1343-494d-a71b-f1226f430876-audit-dir\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.946034 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-service-ca\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.946257 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-cliconfig\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.949578 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.950154 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.950217 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-template-login\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.950595 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-serving-cert\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.951119 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-template-error\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.952638 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-router-certs\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.959703 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-system-session\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.961096 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt7ws\" (UniqueName: \"kubernetes.io/projected/2a9fff63-1343-494d-a71b-f1226f430876-kube-api-access-nt7ws\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:56 crc kubenswrapper[4995]: I0120 16:35:56.962687 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/2a9fff63-1343-494d-a71b-f1226f430876-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-766989869-rz6qj\" (UID: \"2a9fff63-1343-494d-a71b-f1226f430876\") " pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.064816 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.105128 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.127392 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.177487 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.239047 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.303674 4995 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.372672 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.429490 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.567616 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.571823 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.597195 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.624868 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.746924 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.747420 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.757827 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.764489 4995 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.764685 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a" gracePeriod=5 Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.847692 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.914499 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.994863 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10d3852f-ae68-471d-8501-a31f353ae0cd" path="/var/lib/kubelet/pods/10d3852f-ae68-471d-8501-a31f353ae0cd/volumes" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.995522 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a8fc6c3-ef12-4e57-a446-0cfed712d95e" path="/var/lib/kubelet/pods/2a8fc6c3-ef12-4e57-a446-0cfed712d95e/volumes" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.996015 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45b2120d-74d7-4b92-90c6-18b7bbe7375e" path="/var/lib/kubelet/pods/45b2120d-74d7-4b92-90c6-18b7bbe7375e/volumes" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.996576 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58893b4f-0622-48ee-bc1d-24ed2b499606" path="/var/lib/kubelet/pods/58893b4f-0622-48ee-bc1d-24ed2b499606/volumes" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.997125 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7087f2d6-d879-419d-bd93-538d617dcc91" path="/var/lib/kubelet/pods/7087f2d6-d879-419d-bd93-538d617dcc91/volumes" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.997648 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85820e5b-3be3-43a1-a954-7ce3719e24b5" path="/var/lib/kubelet/pods/85820e5b-3be3-43a1-a954-7ce3719e24b5/volumes" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.998182 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0" path="/var/lib/kubelet/pods/affe9bc2-72cb-4ee2-a2f5-6c570ccb8dc0/volumes" Jan 20 16:35:57 crc kubenswrapper[4995]: I0120 16:35:57.998713 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b57345df-b284-4e63-b77d-f60534099876" path="/var/lib/kubelet/pods/b57345df-b284-4e63-b77d-f60534099876/volumes" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.098789 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.137488 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.203767 4995 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.203814 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.203875 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.204318 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"411ae16791dd1c539512c48733da9913f68217c1757a8eacc81c8925e777a371"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.204408 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://411ae16791dd1c539512c48733da9913f68217c1757a8eacc81c8925e777a371" gracePeriod=30 Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.232233 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.397627 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.420483 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.421752 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.541427 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.613476 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.774219 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.830548 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.880484 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 20 16:35:58 crc kubenswrapper[4995]: I0120 16:35:58.986386 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.096652 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.164675 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.179867 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.188917 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.205501 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.379597 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.435181 4995 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.461140 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.461150 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.504658 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.597389 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.643483 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.663415 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.804108 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.832064 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 20 16:35:59 crc kubenswrapper[4995]: I0120 16:35:59.963320 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 20 16:36:00 crc kubenswrapper[4995]: E0120 16:36:00.181994 4995 log.go:32] "RunPodSandbox from runtime service failed" err=< Jan 20 16:36:00 crc kubenswrapper[4995]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-766989869-rz6qj_openshift-authentication_2a9fff63-1343-494d-a71b-f1226f430876_0(b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161): error adding pod openshift-authentication_oauth-openshift-766989869-rz6qj to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161" Netns:"/var/run/netns/40a8d711-bd3d-42c4-882f-903d49c82304" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-766989869-rz6qj;K8S_POD_INFRA_CONTAINER_ID=b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161;K8S_POD_UID=2a9fff63-1343-494d-a71b-f1226f430876" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-766989869-rz6qj] networking: Multus: [openshift-authentication/oauth-openshift-766989869-rz6qj/2a9fff63-1343-494d-a71b-f1226f430876]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-766989869-rz6qj in out of cluster comm: pod "oauth-openshift-766989869-rz6qj" not found Jan 20 16:36:00 crc kubenswrapper[4995]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 20 16:36:00 crc kubenswrapper[4995]: > Jan 20 16:36:00 crc kubenswrapper[4995]: E0120 16:36:00.182361 4995 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Jan 20 16:36:00 crc kubenswrapper[4995]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-766989869-rz6qj_openshift-authentication_2a9fff63-1343-494d-a71b-f1226f430876_0(b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161): error adding pod openshift-authentication_oauth-openshift-766989869-rz6qj to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161" Netns:"/var/run/netns/40a8d711-bd3d-42c4-882f-903d49c82304" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-766989869-rz6qj;K8S_POD_INFRA_CONTAINER_ID=b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161;K8S_POD_UID=2a9fff63-1343-494d-a71b-f1226f430876" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-766989869-rz6qj] networking: Multus: [openshift-authentication/oauth-openshift-766989869-rz6qj/2a9fff63-1343-494d-a71b-f1226f430876]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-766989869-rz6qj in out of cluster comm: pod "oauth-openshift-766989869-rz6qj" not found Jan 20 16:36:00 crc kubenswrapper[4995]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 20 16:36:00 crc kubenswrapper[4995]: > pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:36:00 crc kubenswrapper[4995]: E0120 16:36:00.182381 4995 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Jan 20 16:36:00 crc kubenswrapper[4995]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-766989869-rz6qj_openshift-authentication_2a9fff63-1343-494d-a71b-f1226f430876_0(b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161): error adding pod openshift-authentication_oauth-openshift-766989869-rz6qj to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161" Netns:"/var/run/netns/40a8d711-bd3d-42c4-882f-903d49c82304" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-766989869-rz6qj;K8S_POD_INFRA_CONTAINER_ID=b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161;K8S_POD_UID=2a9fff63-1343-494d-a71b-f1226f430876" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-766989869-rz6qj] networking: Multus: [openshift-authentication/oauth-openshift-766989869-rz6qj/2a9fff63-1343-494d-a71b-f1226f430876]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-766989869-rz6qj in out of cluster comm: pod "oauth-openshift-766989869-rz6qj" not found Jan 20 16:36:00 crc kubenswrapper[4995]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 20 16:36:00 crc kubenswrapper[4995]: > pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:36:00 crc kubenswrapper[4995]: E0120 16:36:00.182442 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-766989869-rz6qj_openshift-authentication(2a9fff63-1343-494d-a71b-f1226f430876)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-766989869-rz6qj_openshift-authentication(2a9fff63-1343-494d-a71b-f1226f430876)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-766989869-rz6qj_openshift-authentication_2a9fff63-1343-494d-a71b-f1226f430876_0(b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161): error adding pod openshift-authentication_oauth-openshift-766989869-rz6qj to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161\\\" Netns:\\\"/var/run/netns/40a8d711-bd3d-42c4-882f-903d49c82304\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-766989869-rz6qj;K8S_POD_INFRA_CONTAINER_ID=b71fe7b98efeb3ba634decfaf564a8b70fc7fcb78112ff9c7790f98f7005f161;K8S_POD_UID=2a9fff63-1343-494d-a71b-f1226f430876\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-766989869-rz6qj] networking: Multus: [openshift-authentication/oauth-openshift-766989869-rz6qj/2a9fff63-1343-494d-a71b-f1226f430876]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-766989869-rz6qj in out of cluster comm: pod \\\"oauth-openshift-766989869-rz6qj\\\" not found\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" podUID="2a9fff63-1343-494d-a71b-f1226f430876" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.346905 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.445906 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.518303 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.597861 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.601243 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.629055 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.665040 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.710751 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.914258 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 20 16:36:00 crc kubenswrapper[4995]: I0120 16:36:00.957970 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.100240 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.180817 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.284015 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.300432 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.400292 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.456719 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.539772 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.541193 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.588319 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.836158 4995 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.859353 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.859640 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 20 16:36:01 crc kubenswrapper[4995]: I0120 16:36:01.950776 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.027528 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.091833 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.151108 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.234355 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.243151 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.265620 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.336231 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.367745 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.432323 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.433793 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.565420 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.617674 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.643270 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.676266 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 20 16:36:02 crc kubenswrapper[4995]: I0120 16:36:02.855905 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.072673 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.216612 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.317822 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.318118 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.330452 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.346015 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.424909 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.424966 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425017 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425020 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425034 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425070 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425175 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425185 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425277 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425906 4995 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425935 4995 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425948 4995 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.425958 4995 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.435845 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.499001 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.527120 4995 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.537397 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.537458 4995 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a" exitCode=137 Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.537512 4995 scope.go:117] "RemoveContainer" containerID="d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.537650 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.559812 4995 scope.go:117] "RemoveContainer" containerID="d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a" Jan 20 16:36:03 crc kubenswrapper[4995]: E0120 16:36:03.561801 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a\": container with ID starting with d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a not found: ID does not exist" containerID="d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.561865 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a"} err="failed to get container status \"d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a\": rpc error: code = NotFound desc = could not find container \"d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a\": container with ID starting with d673e25a0a34431dbf6999bc1236a53176c851381b94ed02d35a24bd3137b30a not found: ID does not exist" Jan 20 16:36:03 crc kubenswrapper[4995]: I0120 16:36:03.999248 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.000497 4995 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.012194 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.012289 4995 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="0e361a4d-db2a-42a5-9283-f9ab2996f3de" Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.015938 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.015977 4995 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="0e361a4d-db2a-42a5-9283-f9ab2996f3de" Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.089005 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.127831 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.568481 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.599701 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 20 16:36:04 crc kubenswrapper[4995]: I0120 16:36:04.800302 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 20 16:36:05 crc kubenswrapper[4995]: I0120 16:36:05.719966 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 20 16:36:05 crc kubenswrapper[4995]: I0120 16:36:05.818762 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 20 16:36:05 crc kubenswrapper[4995]: I0120 16:36:05.854743 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 20 16:36:06 crc kubenswrapper[4995]: I0120 16:36:06.293373 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 20 16:36:06 crc kubenswrapper[4995]: I0120 16:36:06.489858 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 20 16:36:14 crc kubenswrapper[4995]: I0120 16:36:14.988615 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:36:14 crc kubenswrapper[4995]: I0120 16:36:14.989315 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:36:15 crc kubenswrapper[4995]: I0120 16:36:15.392579 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-766989869-rz6qj"] Jan 20 16:36:15 crc kubenswrapper[4995]: I0120 16:36:15.607751 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" event={"ID":"2a9fff63-1343-494d-a71b-f1226f430876","Type":"ContainerStarted","Data":"8d0bb58be3390d84ccdc4a2f2bdbe2b22530bee68505e578b777aed07c9eed86"} Jan 20 16:36:16 crc kubenswrapper[4995]: I0120 16:36:16.617985 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" event={"ID":"2a9fff63-1343-494d-a71b-f1226f430876","Type":"ContainerStarted","Data":"2411882aec9966a3669177378a920ca9be865df297e15bc5ee65a02b04340c61"} Jan 20 16:36:16 crc kubenswrapper[4995]: I0120 16:36:16.619889 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:36:16 crc kubenswrapper[4995]: I0120 16:36:16.627932 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" Jan 20 16:36:16 crc kubenswrapper[4995]: I0120 16:36:16.642723 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-766989869-rz6qj" podStartSLOduration=61.642703903 podStartE2EDuration="1m1.642703903s" podCreationTimestamp="2026-01-20 16:35:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:36:16.640464652 +0000 UTC m=+294.885069498" watchObservedRunningTime="2026-01-20 16:36:16.642703903 +0000 UTC m=+294.887308709" Jan 20 16:36:21 crc kubenswrapper[4995]: I0120 16:36:21.874956 4995 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 20 16:36:28 crc kubenswrapper[4995]: I0120 16:36:28.707880 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 20 16:36:28 crc kubenswrapper[4995]: I0120 16:36:28.713659 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 20 16:36:28 crc kubenswrapper[4995]: I0120 16:36:28.713749 4995 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="411ae16791dd1c539512c48733da9913f68217c1757a8eacc81c8925e777a371" exitCode=137 Jan 20 16:36:28 crc kubenswrapper[4995]: I0120 16:36:28.713809 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"411ae16791dd1c539512c48733da9913f68217c1757a8eacc81c8925e777a371"} Jan 20 16:36:28 crc kubenswrapper[4995]: I0120 16:36:28.713868 4995 scope.go:117] "RemoveContainer" containerID="c889bf83d3e567901248ed92b467259502dab610c3540625816080d05ed794d4" Jan 20 16:36:29 crc kubenswrapper[4995]: I0120 16:36:29.730330 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 20 16:36:29 crc kubenswrapper[4995]: I0120 16:36:29.731832 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"836e2a7bfe4ef793e9ca85e8721b135b8707c3a57814b45ab4c125ed152d51d2"} Jan 20 16:36:30 crc kubenswrapper[4995]: I0120 16:36:30.874222 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-62wzq"] Jan 20 16:36:30 crc kubenswrapper[4995]: E0120 16:36:30.874718 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 20 16:36:30 crc kubenswrapper[4995]: I0120 16:36:30.874729 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 20 16:36:30 crc kubenswrapper[4995]: I0120 16:36:30.874830 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 20 16:36:30 crc kubenswrapper[4995]: I0120 16:36:30.875478 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:30 crc kubenswrapper[4995]: I0120 16:36:30.877700 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 20 16:36:30 crc kubenswrapper[4995]: I0120 16:36:30.885611 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-62wzq"] Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.023601 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gqkx\" (UniqueName: \"kubernetes.io/projected/9539d617-3abb-4dd5-aa3a-f9f6dd8615bb-kube-api-access-7gqkx\") pod \"redhat-marketplace-62wzq\" (UID: \"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb\") " pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.023711 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9539d617-3abb-4dd5-aa3a-f9f6dd8615bb-catalog-content\") pod \"redhat-marketplace-62wzq\" (UID: \"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb\") " pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.023746 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9539d617-3abb-4dd5-aa3a-f9f6dd8615bb-utilities\") pod \"redhat-marketplace-62wzq\" (UID: \"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb\") " pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.069253 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c5n9d"] Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.070341 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.073130 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.090891 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c5n9d"] Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.125262 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9539d617-3abb-4dd5-aa3a-f9f6dd8615bb-catalog-content\") pod \"redhat-marketplace-62wzq\" (UID: \"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb\") " pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.125486 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9539d617-3abb-4dd5-aa3a-f9f6dd8615bb-utilities\") pod \"redhat-marketplace-62wzq\" (UID: \"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb\") " pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.125643 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gqkx\" (UniqueName: \"kubernetes.io/projected/9539d617-3abb-4dd5-aa3a-f9f6dd8615bb-kube-api-access-7gqkx\") pod \"redhat-marketplace-62wzq\" (UID: \"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb\") " pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.126259 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9539d617-3abb-4dd5-aa3a-f9f6dd8615bb-catalog-content\") pod \"redhat-marketplace-62wzq\" (UID: \"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb\") " pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.126288 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9539d617-3abb-4dd5-aa3a-f9f6dd8615bb-utilities\") pod \"redhat-marketplace-62wzq\" (UID: \"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb\") " pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.155000 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gqkx\" (UniqueName: \"kubernetes.io/projected/9539d617-3abb-4dd5-aa3a-f9f6dd8615bb-kube-api-access-7gqkx\") pod \"redhat-marketplace-62wzq\" (UID: \"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb\") " pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.197830 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.227625 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-catalog-content\") pod \"redhat-operators-c5n9d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.227692 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-utilities\") pod \"redhat-operators-c5n9d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.227767 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw2p9\" (UniqueName: \"kubernetes.io/projected/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-kube-api-access-mw2p9\") pod \"redhat-operators-c5n9d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.328418 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-catalog-content\") pod \"redhat-operators-c5n9d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.328838 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-utilities\") pod \"redhat-operators-c5n9d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.328876 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw2p9\" (UniqueName: \"kubernetes.io/projected/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-kube-api-access-mw2p9\") pod \"redhat-operators-c5n9d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.329395 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-utilities\") pod \"redhat-operators-c5n9d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.334306 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-catalog-content\") pod \"redhat-operators-c5n9d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.362510 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw2p9\" (UniqueName: \"kubernetes.io/projected/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-kube-api-access-mw2p9\") pod \"redhat-operators-c5n9d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.397278 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.603719 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-62wzq"] Jan 20 16:36:31 crc kubenswrapper[4995]: W0120 16:36:31.608053 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9539d617_3abb_4dd5_aa3a_f9f6dd8615bb.slice/crio-67f1968221af6965000e3349add849c8e44f53a97318845f762fecdb2b60d493 WatchSource:0}: Error finding container 67f1968221af6965000e3349add849c8e44f53a97318845f762fecdb2b60d493: Status 404 returned error can't find the container with id 67f1968221af6965000e3349add849c8e44f53a97318845f762fecdb2b60d493 Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.746774 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-62wzq" event={"ID":"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb","Type":"ContainerStarted","Data":"67f1968221af6965000e3349add849c8e44f53a97318845f762fecdb2b60d493"} Jan 20 16:36:31 crc kubenswrapper[4995]: I0120 16:36:31.775132 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c5n9d"] Jan 20 16:36:31 crc kubenswrapper[4995]: W0120 16:36:31.778044 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f05a60b_593a_44c2_8c7e_0e7b1d86a15d.slice/crio-8afa0c7fd184ff3a545b6aa401d2955aa2c0d67e4ffe2bf6239eb902e804915e WatchSource:0}: Error finding container 8afa0c7fd184ff3a545b6aa401d2955aa2c0d67e4ffe2bf6239eb902e804915e: Status 404 returned error can't find the container with id 8afa0c7fd184ff3a545b6aa401d2955aa2c0d67e4ffe2bf6239eb902e804915e Jan 20 16:36:32 crc kubenswrapper[4995]: I0120 16:36:32.761368 4995 generic.go:334] "Generic (PLEG): container finished" podID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerID="69f87e0ecac3b1ad4ad2bd46512c44d7f6fa8d0336b2f36939c68c21869ecd44" exitCode=0 Jan 20 16:36:32 crc kubenswrapper[4995]: I0120 16:36:32.761469 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5n9d" event={"ID":"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d","Type":"ContainerDied","Data":"69f87e0ecac3b1ad4ad2bd46512c44d7f6fa8d0336b2f36939c68c21869ecd44"} Jan 20 16:36:32 crc kubenswrapper[4995]: I0120 16:36:32.762307 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5n9d" event={"ID":"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d","Type":"ContainerStarted","Data":"8afa0c7fd184ff3a545b6aa401d2955aa2c0d67e4ffe2bf6239eb902e804915e"} Jan 20 16:36:32 crc kubenswrapper[4995]: I0120 16:36:32.764993 4995 generic.go:334] "Generic (PLEG): container finished" podID="9539d617-3abb-4dd5-aa3a-f9f6dd8615bb" containerID="c5556adcfaaf7b97a30ef344e4dc6e6801fd445525f28a8e5457f55e9b0f3ce1" exitCode=0 Jan 20 16:36:32 crc kubenswrapper[4995]: I0120 16:36:32.765040 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-62wzq" event={"ID":"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb","Type":"ContainerDied","Data":"c5556adcfaaf7b97a30ef344e4dc6e6801fd445525f28a8e5457f55e9b0f3ce1"} Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.273381 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x4fxr"] Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.275176 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.278662 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.282173 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x4fxr"] Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.466520 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbmv9\" (UniqueName: \"kubernetes.io/projected/04b5f989-6750-4e5c-8ded-4af0bf07325b-kube-api-access-qbmv9\") pod \"community-operators-x4fxr\" (UID: \"04b5f989-6750-4e5c-8ded-4af0bf07325b\") " pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.466935 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04b5f989-6750-4e5c-8ded-4af0bf07325b-utilities\") pod \"community-operators-x4fxr\" (UID: \"04b5f989-6750-4e5c-8ded-4af0bf07325b\") " pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.467122 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04b5f989-6750-4e5c-8ded-4af0bf07325b-catalog-content\") pod \"community-operators-x4fxr\" (UID: \"04b5f989-6750-4e5c-8ded-4af0bf07325b\") " pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.473898 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4w5vv"] Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.476202 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.480513 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4w5vv"] Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.481368 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.568614 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbmv9\" (UniqueName: \"kubernetes.io/projected/04b5f989-6750-4e5c-8ded-4af0bf07325b-kube-api-access-qbmv9\") pod \"community-operators-x4fxr\" (UID: \"04b5f989-6750-4e5c-8ded-4af0bf07325b\") " pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.568662 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04b5f989-6750-4e5c-8ded-4af0bf07325b-utilities\") pod \"community-operators-x4fxr\" (UID: \"04b5f989-6750-4e5c-8ded-4af0bf07325b\") " pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.568709 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04b5f989-6750-4e5c-8ded-4af0bf07325b-catalog-content\") pod \"community-operators-x4fxr\" (UID: \"04b5f989-6750-4e5c-8ded-4af0bf07325b\") " pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.569487 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04b5f989-6750-4e5c-8ded-4af0bf07325b-catalog-content\") pod \"community-operators-x4fxr\" (UID: \"04b5f989-6750-4e5c-8ded-4af0bf07325b\") " pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.569707 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04b5f989-6750-4e5c-8ded-4af0bf07325b-utilities\") pod \"community-operators-x4fxr\" (UID: \"04b5f989-6750-4e5c-8ded-4af0bf07325b\") " pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.588645 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbmv9\" (UniqueName: \"kubernetes.io/projected/04b5f989-6750-4e5c-8ded-4af0bf07325b-kube-api-access-qbmv9\") pod \"community-operators-x4fxr\" (UID: \"04b5f989-6750-4e5c-8ded-4af0bf07325b\") " pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.591759 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.676209 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-utilities\") pod \"certified-operators-4w5vv\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.676305 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-catalog-content\") pod \"certified-operators-4w5vv\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.676342 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9n9k\" (UniqueName: \"kubernetes.io/projected/92414b60-e9e0-45ef-91ab-8ce0734f081b-kube-api-access-g9n9k\") pod \"certified-operators-4w5vv\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.778364 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-utilities\") pod \"certified-operators-4w5vv\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.778699 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-catalog-content\") pod \"certified-operators-4w5vv\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.778721 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9n9k\" (UniqueName: \"kubernetes.io/projected/92414b60-e9e0-45ef-91ab-8ce0734f081b-kube-api-access-g9n9k\") pod \"certified-operators-4w5vv\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.778854 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-utilities\") pod \"certified-operators-4w5vv\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.779190 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-catalog-content\") pod \"certified-operators-4w5vv\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.781791 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5n9d" event={"ID":"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d","Type":"ContainerStarted","Data":"863092082e6276cb557d45a9aad37301d2630adc54b2312217db4b3b71410f6f"} Jan 20 16:36:33 crc kubenswrapper[4995]: I0120 16:36:33.798751 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9n9k\" (UniqueName: \"kubernetes.io/projected/92414b60-e9e0-45ef-91ab-8ce0734f081b-kube-api-access-g9n9k\") pod \"certified-operators-4w5vv\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.051211 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x4fxr"] Jan 20 16:36:34 crc kubenswrapper[4995]: W0120 16:36:34.058994 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04b5f989_6750_4e5c_8ded_4af0bf07325b.slice/crio-d811fad0af8904514b1e014b2698f0e4886f27f7c3d5dd36bc747cecb7477f7d WatchSource:0}: Error finding container d811fad0af8904514b1e014b2698f0e4886f27f7c3d5dd36bc747cecb7477f7d: Status 404 returned error can't find the container with id d811fad0af8904514b1e014b2698f0e4886f27f7c3d5dd36bc747cecb7477f7d Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.095358 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.362738 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4w5vv"] Jan 20 16:36:34 crc kubenswrapper[4995]: W0120 16:36:34.365569 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92414b60_e9e0_45ef_91ab_8ce0734f081b.slice/crio-30fcd77cc111fd01f44c34e76f725d8a0ede1c9396ce2b041be0e5f6689661e7 WatchSource:0}: Error finding container 30fcd77cc111fd01f44c34e76f725d8a0ede1c9396ce2b041be0e5f6689661e7: Status 404 returned error can't find the container with id 30fcd77cc111fd01f44c34e76f725d8a0ede1c9396ce2b041be0e5f6689661e7 Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.791938 4995 generic.go:334] "Generic (PLEG): container finished" podID="04b5f989-6750-4e5c-8ded-4af0bf07325b" containerID="1178b037d158b372dc24a5ed57bea4b7046a6980d2f82359ea0950dfc100ecb2" exitCode=0 Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.792363 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4fxr" event={"ID":"04b5f989-6750-4e5c-8ded-4af0bf07325b","Type":"ContainerDied","Data":"1178b037d158b372dc24a5ed57bea4b7046a6980d2f82359ea0950dfc100ecb2"} Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.792924 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4fxr" event={"ID":"04b5f989-6750-4e5c-8ded-4af0bf07325b","Type":"ContainerStarted","Data":"d811fad0af8904514b1e014b2698f0e4886f27f7c3d5dd36bc747cecb7477f7d"} Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.797434 4995 generic.go:334] "Generic (PLEG): container finished" podID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerID="65ab553209b4a826764656ac59331d1c228979e6f1f61bdd2e476e5e7342e4e4" exitCode=0 Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.797479 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4w5vv" event={"ID":"92414b60-e9e0-45ef-91ab-8ce0734f081b","Type":"ContainerDied","Data":"65ab553209b4a826764656ac59331d1c228979e6f1f61bdd2e476e5e7342e4e4"} Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.797499 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4w5vv" event={"ID":"92414b60-e9e0-45ef-91ab-8ce0734f081b","Type":"ContainerStarted","Data":"30fcd77cc111fd01f44c34e76f725d8a0ede1c9396ce2b041be0e5f6689661e7"} Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.809738 4995 generic.go:334] "Generic (PLEG): container finished" podID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerID="863092082e6276cb557d45a9aad37301d2630adc54b2312217db4b3b71410f6f" exitCode=0 Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.809805 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5n9d" event={"ID":"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d","Type":"ContainerDied","Data":"863092082e6276cb557d45a9aad37301d2630adc54b2312217db4b3b71410f6f"} Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.818025 4995 generic.go:334] "Generic (PLEG): container finished" podID="9539d617-3abb-4dd5-aa3a-f9f6dd8615bb" containerID="05f9efb7ea6d787e26acb8af98d59d9feeda388adc917d1af2c0caa3c008e5e6" exitCode=0 Jan 20 16:36:34 crc kubenswrapper[4995]: I0120 16:36:34.818114 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-62wzq" event={"ID":"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb","Type":"ContainerDied","Data":"05f9efb7ea6d787e26acb8af98d59d9feeda388adc917d1af2c0caa3c008e5e6"} Jan 20 16:36:35 crc kubenswrapper[4995]: I0120 16:36:35.824471 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4fxr" event={"ID":"04b5f989-6750-4e5c-8ded-4af0bf07325b","Type":"ContainerStarted","Data":"83c82115a834a6b8f88aa3590106372b9b9fa8a8da7be5713065fa003aa348fc"} Jan 20 16:36:35 crc kubenswrapper[4995]: I0120 16:36:35.827996 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5n9d" event={"ID":"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d","Type":"ContainerStarted","Data":"5bc7dc0502c7bd3541ee95af278707323b6be2b75197ab9f51a470c282ab95ac"} Jan 20 16:36:35 crc kubenswrapper[4995]: I0120 16:36:35.830452 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-62wzq" event={"ID":"9539d617-3abb-4dd5-aa3a-f9f6dd8615bb","Type":"ContainerStarted","Data":"b1dfc3906ef90763eab33d0c2c49be2adca076aa34db12c7743f6ba98c25d786"} Jan 20 16:36:35 crc kubenswrapper[4995]: I0120 16:36:35.908863 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-62wzq" podStartSLOduration=3.239882997 podStartE2EDuration="5.908843699s" podCreationTimestamp="2026-01-20 16:36:30 +0000 UTC" firstStartedPulling="2026-01-20 16:36:32.767846657 +0000 UTC m=+311.012451503" lastFinishedPulling="2026-01-20 16:36:35.436807389 +0000 UTC m=+313.681412205" observedRunningTime="2026-01-20 16:36:35.907532643 +0000 UTC m=+314.152137469" watchObservedRunningTime="2026-01-20 16:36:35.908843699 +0000 UTC m=+314.153448515" Jan 20 16:36:35 crc kubenswrapper[4995]: I0120 16:36:35.910474 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c5n9d" podStartSLOduration=2.316730335 podStartE2EDuration="4.910465305s" podCreationTimestamp="2026-01-20 16:36:31 +0000 UTC" firstStartedPulling="2026-01-20 16:36:32.763707653 +0000 UTC m=+311.008312499" lastFinishedPulling="2026-01-20 16:36:35.357442663 +0000 UTC m=+313.602047469" observedRunningTime="2026-01-20 16:36:35.889588959 +0000 UTC m=+314.134193755" watchObservedRunningTime="2026-01-20 16:36:35.910465305 +0000 UTC m=+314.155070131" Jan 20 16:36:36 crc kubenswrapper[4995]: I0120 16:36:36.838995 4995 generic.go:334] "Generic (PLEG): container finished" podID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerID="bc932303901d904876a571d44475672fae0db58106b30fa7eceb4848a91e7174" exitCode=0 Jan 20 16:36:36 crc kubenswrapper[4995]: I0120 16:36:36.839122 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4w5vv" event={"ID":"92414b60-e9e0-45ef-91ab-8ce0734f081b","Type":"ContainerDied","Data":"bc932303901d904876a571d44475672fae0db58106b30fa7eceb4848a91e7174"} Jan 20 16:36:36 crc kubenswrapper[4995]: I0120 16:36:36.842067 4995 generic.go:334] "Generic (PLEG): container finished" podID="04b5f989-6750-4e5c-8ded-4af0bf07325b" containerID="83c82115a834a6b8f88aa3590106372b9b9fa8a8da7be5713065fa003aa348fc" exitCode=0 Jan 20 16:36:36 crc kubenswrapper[4995]: I0120 16:36:36.842120 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4fxr" event={"ID":"04b5f989-6750-4e5c-8ded-4af0bf07325b","Type":"ContainerDied","Data":"83c82115a834a6b8f88aa3590106372b9b9fa8a8da7be5713065fa003aa348fc"} Jan 20 16:36:37 crc kubenswrapper[4995]: I0120 16:36:37.849291 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4w5vv" event={"ID":"92414b60-e9e0-45ef-91ab-8ce0734f081b","Type":"ContainerStarted","Data":"50b909571ee719e4856ddd25a2b7ef9226da80ad57513600916f0e4b6f4228dc"} Jan 20 16:36:37 crc kubenswrapper[4995]: I0120 16:36:37.852062 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x4fxr" event={"ID":"04b5f989-6750-4e5c-8ded-4af0bf07325b","Type":"ContainerStarted","Data":"77b5e88e216c55330f8fd4d79879d82eef05661f0d624c46cf9b336e7ecbe878"} Jan 20 16:36:37 crc kubenswrapper[4995]: I0120 16:36:37.865611 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4w5vv" podStartSLOduration=2.166894684 podStartE2EDuration="4.865589597s" podCreationTimestamp="2026-01-20 16:36:33 +0000 UTC" firstStartedPulling="2026-01-20 16:36:34.800268739 +0000 UTC m=+313.044873595" lastFinishedPulling="2026-01-20 16:36:37.498963702 +0000 UTC m=+315.743568508" observedRunningTime="2026-01-20 16:36:37.865544546 +0000 UTC m=+316.110149362" watchObservedRunningTime="2026-01-20 16:36:37.865589597 +0000 UTC m=+316.110194413" Jan 20 16:36:37 crc kubenswrapper[4995]: I0120 16:36:37.881934 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x4fxr" podStartSLOduration=2.374236906 podStartE2EDuration="4.881914747s" podCreationTimestamp="2026-01-20 16:36:33 +0000 UTC" firstStartedPulling="2026-01-20 16:36:34.793703559 +0000 UTC m=+313.038308405" lastFinishedPulling="2026-01-20 16:36:37.30138144 +0000 UTC m=+315.545986246" observedRunningTime="2026-01-20 16:36:37.879668925 +0000 UTC m=+316.124273751" watchObservedRunningTime="2026-01-20 16:36:37.881914747 +0000 UTC m=+316.126519553" Jan 20 16:36:38 crc kubenswrapper[4995]: I0120 16:36:38.203741 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 16:36:38 crc kubenswrapper[4995]: I0120 16:36:38.208011 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 16:36:38 crc kubenswrapper[4995]: I0120 16:36:38.856989 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 16:36:38 crc kubenswrapper[4995]: I0120 16:36:38.862753 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 16:36:41 crc kubenswrapper[4995]: I0120 16:36:41.198182 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:41 crc kubenswrapper[4995]: I0120 16:36:41.198730 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:41 crc kubenswrapper[4995]: I0120 16:36:41.273191 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:41 crc kubenswrapper[4995]: I0120 16:36:41.397818 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:41 crc kubenswrapper[4995]: I0120 16:36:41.397884 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:41 crc kubenswrapper[4995]: I0120 16:36:41.952313 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-62wzq" Jan 20 16:36:42 crc kubenswrapper[4995]: I0120 16:36:42.463480 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c5n9d" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerName="registry-server" probeResult="failure" output=< Jan 20 16:36:42 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 16:36:42 crc kubenswrapper[4995]: > Jan 20 16:36:43 crc kubenswrapper[4995]: I0120 16:36:43.592515 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:43 crc kubenswrapper[4995]: I0120 16:36:43.594059 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:43 crc kubenswrapper[4995]: I0120 16:36:43.647357 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:43 crc kubenswrapper[4995]: I0120 16:36:43.940456 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x4fxr" Jan 20 16:36:44 crc kubenswrapper[4995]: I0120 16:36:44.096176 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:44 crc kubenswrapper[4995]: I0120 16:36:44.096335 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:44 crc kubenswrapper[4995]: I0120 16:36:44.146615 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:44 crc kubenswrapper[4995]: I0120 16:36:44.945038 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 16:36:51 crc kubenswrapper[4995]: I0120 16:36:51.441684 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:36:51 crc kubenswrapper[4995]: I0120 16:36:51.478638 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.537946 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-xmzwh"] Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.539278 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.550800 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-xmzwh"] Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.571682 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.571757 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.612264 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b8ba9422-2833-49d6-b7f8-763f5462d069-registry-certificates\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.612301 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnnmc\" (UniqueName: \"kubernetes.io/projected/b8ba9422-2833-49d6-b7f8-763f5462d069-kube-api-access-qnnmc\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.612323 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b8ba9422-2833-49d6-b7f8-763f5462d069-ca-trust-extracted\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.612494 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b8ba9422-2833-49d6-b7f8-763f5462d069-installation-pull-secrets\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.612635 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b8ba9422-2833-49d6-b7f8-763f5462d069-registry-tls\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.612715 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.612744 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b8ba9422-2833-49d6-b7f8-763f5462d069-trusted-ca\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.612784 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b8ba9422-2833-49d6-b7f8-763f5462d069-bound-sa-token\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.634206 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.713466 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b8ba9422-2833-49d6-b7f8-763f5462d069-trusted-ca\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.713515 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b8ba9422-2833-49d6-b7f8-763f5462d069-bound-sa-token\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.713564 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b8ba9422-2833-49d6-b7f8-763f5462d069-registry-certificates\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.713585 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnnmc\" (UniqueName: \"kubernetes.io/projected/b8ba9422-2833-49d6-b7f8-763f5462d069-kube-api-access-qnnmc\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.713609 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b8ba9422-2833-49d6-b7f8-763f5462d069-ca-trust-extracted\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.713654 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b8ba9422-2833-49d6-b7f8-763f5462d069-installation-pull-secrets\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.713688 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b8ba9422-2833-49d6-b7f8-763f5462d069-registry-tls\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.717134 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b8ba9422-2833-49d6-b7f8-763f5462d069-registry-certificates\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.717145 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b8ba9422-2833-49d6-b7f8-763f5462d069-trusted-ca\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.718289 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b8ba9422-2833-49d6-b7f8-763f5462d069-ca-trust-extracted\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.720828 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b8ba9422-2833-49d6-b7f8-763f5462d069-installation-pull-secrets\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.720887 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b8ba9422-2833-49d6-b7f8-763f5462d069-registry-tls\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.729754 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnnmc\" (UniqueName: \"kubernetes.io/projected/b8ba9422-2833-49d6-b7f8-763f5462d069-kube-api-access-qnnmc\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.730714 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b8ba9422-2833-49d6-b7f8-763f5462d069-bound-sa-token\") pod \"image-registry-66df7c8f76-xmzwh\" (UID: \"b8ba9422-2833-49d6-b7f8-763f5462d069\") " pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:30 crc kubenswrapper[4995]: I0120 16:37:30.863642 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:31 crc kubenswrapper[4995]: I0120 16:37:31.124566 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-xmzwh"] Jan 20 16:37:31 crc kubenswrapper[4995]: I0120 16:37:31.185277 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" event={"ID":"b8ba9422-2833-49d6-b7f8-763f5462d069","Type":"ContainerStarted","Data":"80675b1671a6ab2e3b7e17d50b44e2cc4beaa0d179b56a63f557cc6bef2a8efd"} Jan 20 16:37:33 crc kubenswrapper[4995]: I0120 16:37:33.200834 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" event={"ID":"b8ba9422-2833-49d6-b7f8-763f5462d069","Type":"ContainerStarted","Data":"8bba79d28affaff24d2e9d573709920af8c8cfafb25e7be0377d683b268b009c"} Jan 20 16:37:33 crc kubenswrapper[4995]: I0120 16:37:33.202272 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:33 crc kubenswrapper[4995]: I0120 16:37:33.225354 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" podStartSLOduration=3.225334545 podStartE2EDuration="3.225334545s" podCreationTimestamp="2026-01-20 16:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:37:33.222903668 +0000 UTC m=+371.467508484" watchObservedRunningTime="2026-01-20 16:37:33.225334545 +0000 UTC m=+371.469939371" Jan 20 16:37:50 crc kubenswrapper[4995]: I0120 16:37:50.869504 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-xmzwh" Jan 20 16:37:50 crc kubenswrapper[4995]: I0120 16:37:50.941321 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5944b"] Jan 20 16:38:00 crc kubenswrapper[4995]: I0120 16:38:00.571848 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:38:00 crc kubenswrapper[4995]: I0120 16:38:00.572834 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:38:15 crc kubenswrapper[4995]: I0120 16:38:15.977842 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" podUID="8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" containerName="registry" containerID="cri-o://3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350" gracePeriod=30 Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.381136 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.500436 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9t7b\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-kube-api-access-d9t7b\") pod \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.500515 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-ca-trust-extracted\") pod \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.500540 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-bound-sa-token\") pod \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.500566 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-certificates\") pod \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.500600 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-tls\") pod \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.500643 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-installation-pull-secrets\") pod \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.500848 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.500888 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-trusted-ca\") pod \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\" (UID: \"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf\") " Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.501992 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.502433 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.507543 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.507848 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-kube-api-access-d9t7b" (OuterVolumeSpecName: "kube-api-access-d9t7b") pod "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf"). InnerVolumeSpecName "kube-api-access-d9t7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.512186 4995 generic.go:334] "Generic (PLEG): container finished" podID="8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" containerID="3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350" exitCode=0 Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.512257 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.512296 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.512232 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" event={"ID":"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf","Type":"ContainerDied","Data":"3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350"} Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.512355 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5944b" event={"ID":"8e121cf7-8b43-4ac2-8f76-9376b6e97ccf","Type":"ContainerDied","Data":"2d4e21a15a77791b35cbdd9b837551f83b2fd81efd23f054567b19dc67c26a65"} Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.512389 4995 scope.go:117] "RemoveContainer" containerID="3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.517050 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.521360 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.526227 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" (UID: "8e121cf7-8b43-4ac2-8f76-9376b6e97ccf"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.562280 4995 scope.go:117] "RemoveContainer" containerID="3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350" Jan 20 16:38:16 crc kubenswrapper[4995]: E0120 16:38:16.562765 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350\": container with ID starting with 3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350 not found: ID does not exist" containerID="3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.562805 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350"} err="failed to get container status \"3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350\": rpc error: code = NotFound desc = could not find container \"3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350\": container with ID starting with 3856e7cfd99ca572682f78fcd23869440f8c416002443040ea5349982cdb9350 not found: ID does not exist" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.601905 4995 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.601947 4995 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.601965 4995 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.601986 4995 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.602004 4995 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.602020 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.602037 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9t7b\" (UniqueName: \"kubernetes.io/projected/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf-kube-api-access-d9t7b\") on node \"crc\" DevicePath \"\"" Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.857654 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5944b"] Jan 20 16:38:16 crc kubenswrapper[4995]: I0120 16:38:16.865985 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5944b"] Jan 20 16:38:18 crc kubenswrapper[4995]: I0120 16:38:18.001988 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" path="/var/lib/kubelet/pods/8e121cf7-8b43-4ac2-8f76-9376b6e97ccf/volumes" Jan 20 16:38:30 crc kubenswrapper[4995]: I0120 16:38:30.571712 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:38:30 crc kubenswrapper[4995]: I0120 16:38:30.572481 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:38:30 crc kubenswrapper[4995]: I0120 16:38:30.572550 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:38:30 crc kubenswrapper[4995]: I0120 16:38:30.573640 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bb947a8aacc0062f1c7243926da3eb2ca7a8e73c037234e2af5d64615ba3f08d"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 16:38:30 crc kubenswrapper[4995]: I0120 16:38:30.573972 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://bb947a8aacc0062f1c7243926da3eb2ca7a8e73c037234e2af5d64615ba3f08d" gracePeriod=600 Jan 20 16:38:31 crc kubenswrapper[4995]: I0120 16:38:31.623664 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="bb947a8aacc0062f1c7243926da3eb2ca7a8e73c037234e2af5d64615ba3f08d" exitCode=0 Jan 20 16:38:31 crc kubenswrapper[4995]: I0120 16:38:31.623729 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"bb947a8aacc0062f1c7243926da3eb2ca7a8e73c037234e2af5d64615ba3f08d"} Jan 20 16:38:31 crc kubenswrapper[4995]: I0120 16:38:31.624543 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"693a83565e92db396315c9a438801f27de4160695f8ccbcf90e6d5eab58bd11c"} Jan 20 16:38:31 crc kubenswrapper[4995]: I0120 16:38:31.624589 4995 scope.go:117] "RemoveContainer" containerID="efc250f5d1a9b629cc324335dabb8232df402d4f6271495c5141fac4c7226142" Jan 20 16:40:22 crc kubenswrapper[4995]: I0120 16:40:22.276138 4995 scope.go:117] "RemoveContainer" containerID="fb898b142ea965b32189786ce0029b743aca5ac0c25057d12f7f63ac010f6bdf" Jan 20 16:40:22 crc kubenswrapper[4995]: I0120 16:40:22.303960 4995 scope.go:117] "RemoveContainer" containerID="4a39e94808794f5fecd6e6c88841b51b80a41a4dca9152ac3e28eb5865f9c0fb" Jan 20 16:40:30 crc kubenswrapper[4995]: I0120 16:40:30.572034 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:40:30 crc kubenswrapper[4995]: I0120 16:40:30.572782 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:41:00 crc kubenswrapper[4995]: I0120 16:41:00.571973 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:41:00 crc kubenswrapper[4995]: I0120 16:41:00.572919 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:41:22 crc kubenswrapper[4995]: I0120 16:41:22.347415 4995 scope.go:117] "RemoveContainer" containerID="a34d9c4a769bd9623c680053b6dc85d2df654618691071aa051a41eda0d91b6d" Jan 20 16:41:22 crc kubenswrapper[4995]: I0120 16:41:22.383415 4995 scope.go:117] "RemoveContainer" containerID="86e776f3485911accb0959468f006208a76a730f203161fc2e3476195625060c" Jan 20 16:41:22 crc kubenswrapper[4995]: I0120 16:41:22.405535 4995 scope.go:117] "RemoveContainer" containerID="35644e07268412802f9700c81b7eb1507ee7a53e988751a44b07796942b58984" Jan 20 16:41:22 crc kubenswrapper[4995]: I0120 16:41:22.434669 4995 scope.go:117] "RemoveContainer" containerID="b3b8309757a99842bfc414b2173e7b0f8e7ce7642eaf2055d63686f5adb46aaa" Jan 20 16:41:30 crc kubenswrapper[4995]: I0120 16:41:30.572198 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:41:30 crc kubenswrapper[4995]: I0120 16:41:30.572703 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:41:30 crc kubenswrapper[4995]: I0120 16:41:30.572805 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:41:30 crc kubenswrapper[4995]: I0120 16:41:30.574394 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"693a83565e92db396315c9a438801f27de4160695f8ccbcf90e6d5eab58bd11c"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 16:41:30 crc kubenswrapper[4995]: I0120 16:41:30.574508 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://693a83565e92db396315c9a438801f27de4160695f8ccbcf90e6d5eab58bd11c" gracePeriod=600 Jan 20 16:41:30 crc kubenswrapper[4995]: I0120 16:41:30.829903 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="693a83565e92db396315c9a438801f27de4160695f8ccbcf90e6d5eab58bd11c" exitCode=0 Jan 20 16:41:30 crc kubenswrapper[4995]: I0120 16:41:30.830033 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"693a83565e92db396315c9a438801f27de4160695f8ccbcf90e6d5eab58bd11c"} Jan 20 16:41:30 crc kubenswrapper[4995]: I0120 16:41:30.830747 4995 scope.go:117] "RemoveContainer" containerID="bb947a8aacc0062f1c7243926da3eb2ca7a8e73c037234e2af5d64615ba3f08d" Jan 20 16:41:31 crc kubenswrapper[4995]: I0120 16:41:31.840802 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"21a51aeb68249229f8bec50af82e0400807574c3c8c35d6878a257fbb5a8baf3"} Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.465793 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f"] Jan 20 16:42:09 crc kubenswrapper[4995]: E0120 16:42:09.466625 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" containerName="registry" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.466640 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" containerName="registry" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.466754 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e121cf7-8b43-4ac2-8f76-9376b6e97ccf" containerName="registry" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.467201 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.474996 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.475035 4995 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-d2nz9" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.475195 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.479973 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f"] Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.486834 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-s4m7v"] Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.487609 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-s4m7v" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.492535 4995 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-xrl62" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.499183 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-zngmd"] Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.499871 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.505647 4995 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-66rkr" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.513553 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-s4m7v"] Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.539239 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-zngmd"] Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.577333 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cv6cc\" (UniqueName: \"kubernetes.io/projected/4b2e374d-19bf-42a0-8f00-7dea7ac84bea-kube-api-access-cv6cc\") pod \"cert-manager-cainjector-cf98fcc89-s9d5f\" (UID: \"4b2e374d-19bf-42a0-8f00-7dea7ac84bea\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.679176 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49jvb\" (UniqueName: \"kubernetes.io/projected/c4363779-0c13-4195-9d79-aa4271bfc02f-kube-api-access-49jvb\") pod \"cert-manager-858654f9db-s4m7v\" (UID: \"c4363779-0c13-4195-9d79-aa4271bfc02f\") " pod="cert-manager/cert-manager-858654f9db-s4m7v" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.679279 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cv6cc\" (UniqueName: \"kubernetes.io/projected/4b2e374d-19bf-42a0-8f00-7dea7ac84bea-kube-api-access-cv6cc\") pod \"cert-manager-cainjector-cf98fcc89-s9d5f\" (UID: \"4b2e374d-19bf-42a0-8f00-7dea7ac84bea\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.679312 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcz29\" (UniqueName: \"kubernetes.io/projected/ac9c3170-cb6c-4320-ad74-57b76462b730-kube-api-access-hcz29\") pod \"cert-manager-webhook-687f57d79b-zngmd\" (UID: \"ac9c3170-cb6c-4320-ad74-57b76462b730\") " pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.703280 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cv6cc\" (UniqueName: \"kubernetes.io/projected/4b2e374d-19bf-42a0-8f00-7dea7ac84bea-kube-api-access-cv6cc\") pod \"cert-manager-cainjector-cf98fcc89-s9d5f\" (UID: \"4b2e374d-19bf-42a0-8f00-7dea7ac84bea\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.780407 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcz29\" (UniqueName: \"kubernetes.io/projected/ac9c3170-cb6c-4320-ad74-57b76462b730-kube-api-access-hcz29\") pod \"cert-manager-webhook-687f57d79b-zngmd\" (UID: \"ac9c3170-cb6c-4320-ad74-57b76462b730\") " pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.780552 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49jvb\" (UniqueName: \"kubernetes.io/projected/c4363779-0c13-4195-9d79-aa4271bfc02f-kube-api-access-49jvb\") pod \"cert-manager-858654f9db-s4m7v\" (UID: \"c4363779-0c13-4195-9d79-aa4271bfc02f\") " pod="cert-manager/cert-manager-858654f9db-s4m7v" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.799317 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.799768 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49jvb\" (UniqueName: \"kubernetes.io/projected/c4363779-0c13-4195-9d79-aa4271bfc02f-kube-api-access-49jvb\") pod \"cert-manager-858654f9db-s4m7v\" (UID: \"c4363779-0c13-4195-9d79-aa4271bfc02f\") " pod="cert-manager/cert-manager-858654f9db-s4m7v" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.811569 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcz29\" (UniqueName: \"kubernetes.io/projected/ac9c3170-cb6c-4320-ad74-57b76462b730-kube-api-access-hcz29\") pod \"cert-manager-webhook-687f57d79b-zngmd\" (UID: \"ac9c3170-cb6c-4320-ad74-57b76462b730\") " pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.811946 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-s4m7v" Jan 20 16:42:09 crc kubenswrapper[4995]: I0120 16:42:09.822532 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" Jan 20 16:42:10 crc kubenswrapper[4995]: I0120 16:42:10.065432 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f"] Jan 20 16:42:10 crc kubenswrapper[4995]: I0120 16:42:10.073024 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 16:42:10 crc kubenswrapper[4995]: I0120 16:42:10.086034 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f" event={"ID":"4b2e374d-19bf-42a0-8f00-7dea7ac84bea","Type":"ContainerStarted","Data":"dca8fb6595112f95677936ef03b48af6347384a94c4d026d9f70020a95fa76c6"} Jan 20 16:42:10 crc kubenswrapper[4995]: I0120 16:42:10.329337 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-s4m7v"] Jan 20 16:42:10 crc kubenswrapper[4995]: I0120 16:42:10.329817 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-zngmd"] Jan 20 16:42:11 crc kubenswrapper[4995]: I0120 16:42:11.101407 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" event={"ID":"ac9c3170-cb6c-4320-ad74-57b76462b730","Type":"ContainerStarted","Data":"e1d2d32253e1ad4e7fdafc940dbec68bdf45304bc641ac9ddb1d6e57a7116c41"} Jan 20 16:42:11 crc kubenswrapper[4995]: I0120 16:42:11.103536 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-s4m7v" event={"ID":"c4363779-0c13-4195-9d79-aa4271bfc02f","Type":"ContainerStarted","Data":"21847bc875d9b5bcc16c944dcdb5cce6ff06497500d3b42aa496b18e3799ebf7"} Jan 20 16:42:14 crc kubenswrapper[4995]: I0120 16:42:14.127689 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-s4m7v" event={"ID":"c4363779-0c13-4195-9d79-aa4271bfc02f","Type":"ContainerStarted","Data":"85bebe431a9b7a9283e559dbfef74bc674953e3f7fd1f1dd3cec653256b4048c"} Jan 20 16:42:14 crc kubenswrapper[4995]: I0120 16:42:14.129780 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f" event={"ID":"4b2e374d-19bf-42a0-8f00-7dea7ac84bea","Type":"ContainerStarted","Data":"5cec19c1f9bc8709d9e392cf526c0b344cde00b0f22ac56358c5733d4467d7c7"} Jan 20 16:42:14 crc kubenswrapper[4995]: I0120 16:42:14.130948 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" event={"ID":"ac9c3170-cb6c-4320-ad74-57b76462b730","Type":"ContainerStarted","Data":"6f8a99a284658e40ed27fa1b1129f483fa57231f075efd7ce44c94438c7531f3"} Jan 20 16:42:14 crc kubenswrapper[4995]: I0120 16:42:14.131065 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" Jan 20 16:42:14 crc kubenswrapper[4995]: I0120 16:42:14.143944 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-s4m7v" podStartSLOduration=1.704636536 podStartE2EDuration="5.143913577s" podCreationTimestamp="2026-01-20 16:42:09 +0000 UTC" firstStartedPulling="2026-01-20 16:42:10.333228117 +0000 UTC m=+648.577832923" lastFinishedPulling="2026-01-20 16:42:13.772505158 +0000 UTC m=+652.017109964" observedRunningTime="2026-01-20 16:42:14.142272092 +0000 UTC m=+652.386876908" watchObservedRunningTime="2026-01-20 16:42:14.143913577 +0000 UTC m=+652.388518383" Jan 20 16:42:14 crc kubenswrapper[4995]: I0120 16:42:14.166613 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" podStartSLOduration=1.699565345 podStartE2EDuration="5.166598149s" podCreationTimestamp="2026-01-20 16:42:09 +0000 UTC" firstStartedPulling="2026-01-20 16:42:10.33370829 +0000 UTC m=+648.578313146" lastFinishedPulling="2026-01-20 16:42:13.800741144 +0000 UTC m=+652.045345950" observedRunningTime="2026-01-20 16:42:14.164246676 +0000 UTC m=+652.408851482" watchObservedRunningTime="2026-01-20 16:42:14.166598149 +0000 UTC m=+652.411202955" Jan 20 16:42:14 crc kubenswrapper[4995]: I0120 16:42:14.187450 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-s9d5f" podStartSLOduration=1.472338935 podStartE2EDuration="5.187429682s" podCreationTimestamp="2026-01-20 16:42:09 +0000 UTC" firstStartedPulling="2026-01-20 16:42:10.072700951 +0000 UTC m=+648.317305767" lastFinishedPulling="2026-01-20 16:42:13.787791708 +0000 UTC m=+652.032396514" observedRunningTime="2026-01-20 16:42:14.18264354 +0000 UTC m=+652.427248346" watchObservedRunningTime="2026-01-20 16:42:14.187429682 +0000 UTC m=+652.432034488" Jan 20 16:42:18 crc kubenswrapper[4995]: I0120 16:42:18.774305 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qp9h9"] Jan 20 16:42:18 crc kubenswrapper[4995]: I0120 16:42:18.778792 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovn-controller" containerID="cri-o://9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608" gracePeriod=30 Jan 20 16:42:18 crc kubenswrapper[4995]: I0120 16:42:18.779129 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="sbdb" containerID="cri-o://ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c" gracePeriod=30 Jan 20 16:42:18 crc kubenswrapper[4995]: I0120 16:42:18.778832 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="northd" containerID="cri-o://06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5" gracePeriod=30 Jan 20 16:42:18 crc kubenswrapper[4995]: I0120 16:42:18.778863 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="nbdb" containerID="cri-o://7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979" gracePeriod=30 Jan 20 16:42:18 crc kubenswrapper[4995]: I0120 16:42:18.778949 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovn-acl-logging" containerID="cri-o://6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc" gracePeriod=30 Jan 20 16:42:18 crc kubenswrapper[4995]: I0120 16:42:18.778946 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737" gracePeriod=30 Jan 20 16:42:18 crc kubenswrapper[4995]: I0120 16:42:18.778886 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kube-rbac-proxy-node" containerID="cri-o://3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860" gracePeriod=30 Jan 20 16:42:18 crc kubenswrapper[4995]: I0120 16:42:18.819991 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" containerID="cri-o://8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c" gracePeriod=30 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.125177 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/3.log" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.127271 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovn-acl-logging/0.log" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.127789 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovn-controller/0.log" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.128181 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182115 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-t72rn"] Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182314 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="nbdb" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182325 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="nbdb" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182337 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kube-rbac-proxy-node" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182344 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kube-rbac-proxy-node" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182352 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182357 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182366 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182372 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182380 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182385 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182393 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kube-rbac-proxy-ovn-metrics" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182399 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kube-rbac-proxy-ovn-metrics" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182409 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovn-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182415 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovn-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182429 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="northd" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182434 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="northd" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182443 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kubecfg-setup" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182449 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kubecfg-setup" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182457 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="sbdb" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182464 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="sbdb" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182471 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovn-acl-logging" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182477 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovn-acl-logging" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182570 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="nbdb" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182579 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovn-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182587 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182593 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182600 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kube-rbac-proxy-node" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182608 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="northd" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182614 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="kube-rbac-proxy-ovn-metrics" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182622 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182632 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182638 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovn-acl-logging" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182645 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="sbdb" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182726 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182733 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.182740 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182745 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.182844 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" containerName="ovnkube-controller" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.184376 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.186804 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovnkube-controller/3.log" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.189436 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovn-acl-logging/0.log" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.189900 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qp9h9_e82420c5-a3ae-43ea-a208-b757794521a6/ovn-controller/0.log" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190386 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c" exitCode=0 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190411 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c" exitCode=0 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190421 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979" exitCode=0 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190430 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5" exitCode=0 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190437 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737" exitCode=0 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190443 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190446 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190478 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190494 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190505 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190514 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190524 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190445 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860" exitCode=0 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190551 4995 scope.go:117] "RemoveContainer" containerID="8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190567 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc" exitCode=143 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190591 4995 generic.go:334] "Generic (PLEG): container finished" podID="e82420c5-a3ae-43ea-a208-b757794521a6" containerID="9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608" exitCode=143 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190536 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190641 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190656 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190664 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190671 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190677 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190684 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190691 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190697 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190721 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190744 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190751 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190758 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190764 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190772 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190781 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190789 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190796 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190803 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190810 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190820 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190831 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190840 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190846 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190852 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190858 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190864 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190870 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190876 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190883 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190890 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190899 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qp9h9" event={"ID":"e82420c5-a3ae-43ea-a208-b757794521a6","Type":"ContainerDied","Data":"57f34d52cd1b4b5eb0d600f5f8894020ff7ca24e58b5bd1a9b2be2388078c844"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190914 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190923 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190930 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190937 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190946 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190954 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190962 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190968 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190976 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.190983 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.192181 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/2.log" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.192581 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/1.log" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.192622 4995 generic.go:334] "Generic (PLEG): container finished" podID="5008a882-4540-4ebe-8a27-53f0de0cbd4a" containerID="ae12658acf3b63bc36cb1271992b7137508cfafa7404490f7b7e5544d8dd1545" exitCode=2 Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.192643 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vlvwg" event={"ID":"5008a882-4540-4ebe-8a27-53f0de0cbd4a","Type":"ContainerDied","Data":"ae12658acf3b63bc36cb1271992b7137508cfafa7404490f7b7e5544d8dd1545"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.192656 4995 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842"} Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.192906 4995 scope.go:117] "RemoveContainer" containerID="ae12658acf3b63bc36cb1271992b7137508cfafa7404490f7b7e5544d8dd1545" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.193062 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-vlvwg_openshift-multus(5008a882-4540-4ebe-8a27-53f0de0cbd4a)\"" pod="openshift-multus/multus-vlvwg" podUID="5008a882-4540-4ebe-8a27-53f0de0cbd4a" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.209880 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.226994 4995 scope.go:117] "RemoveContainer" containerID="ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.260567 4995 scope.go:117] "RemoveContainer" containerID="7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.279961 4995 scope.go:117] "RemoveContainer" containerID="06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.290843 4995 scope.go:117] "RemoveContainer" containerID="f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.300774 4995 scope.go:117] "RemoveContainer" containerID="3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310785 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-slash\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310815 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-ovn-kubernetes\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310847 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-systemd-units\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310868 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-script-lib\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310885 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-systemd\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310900 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-etc-openvswitch\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310924 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-node-log\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310937 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-kubelet\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310970 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-env-overrides\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310882 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-slash" (OuterVolumeSpecName: "host-slash") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311018 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310995 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310900 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310940 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.310983 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311063 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-bin\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311117 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-log-socket\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311129 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311160 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-var-lib-openvswitch\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311168 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-node-log" (OuterVolumeSpecName: "node-log") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311196 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-log-socket" (OuterVolumeSpecName: "log-socket") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311202 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-openvswitch\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311221 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311222 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-netns\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311247 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311263 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-config\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311277 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311301 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e82420c5-a3ae-43ea-a208-b757794521a6-ovn-node-metrics-cert\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311345 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-netd\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311375 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptmxc\" (UniqueName: \"kubernetes.io/projected/e82420c5-a3ae-43ea-a208-b757794521a6-kube-api-access-ptmxc\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311400 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-ovn\") pod \"e82420c5-a3ae-43ea-a208-b757794521a6\" (UID: \"e82420c5-a3ae-43ea-a208-b757794521a6\") " Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311305 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311496 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311554 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311559 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-log-socket\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311584 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311590 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311602 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-cni-netd\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311657 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-etc-openvswitch\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311679 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-cni-bin\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311694 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311705 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-env-overrides\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311746 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-run-ovn\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311768 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9794z\" (UniqueName: \"kubernetes.io/projected/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-kube-api-access-9794z\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311793 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-run-systemd\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311817 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-slash\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311841 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.311998 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-run-netns\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312040 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-run-openvswitch\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312071 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-ovn-node-metrics-cert\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312108 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-ovnkube-config\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312126 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-node-log\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312139 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-run-ovn-kubernetes\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312153 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-ovnkube-script-lib\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312170 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-kubelet\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312207 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-var-lib-openvswitch\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312286 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-systemd-units\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312406 4995 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312422 4995 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312432 4995 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312442 4995 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-node-log\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312450 4995 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312462 4995 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312476 4995 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312488 4995 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312499 4995 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-log-socket\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312509 4995 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312517 4995 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312525 4995 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312532 4995 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/e82420c5-a3ae-43ea-a208-b757794521a6-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312541 4995 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312549 4995 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312556 4995 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-slash\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.312565 4995 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.316242 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e82420c5-a3ae-43ea-a208-b757794521a6-kube-api-access-ptmxc" (OuterVolumeSpecName: "kube-api-access-ptmxc") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "kube-api-access-ptmxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.316472 4995 scope.go:117] "RemoveContainer" containerID="6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.316576 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e82420c5-a3ae-43ea-a208-b757794521a6-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.324087 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "e82420c5-a3ae-43ea-a208-b757794521a6" (UID: "e82420c5-a3ae-43ea-a208-b757794521a6"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.329449 4995 scope.go:117] "RemoveContainer" containerID="9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.344606 4995 scope.go:117] "RemoveContainer" containerID="408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.357920 4995 scope.go:117] "RemoveContainer" containerID="8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.358242 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": container with ID starting with 8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c not found: ID does not exist" containerID="8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.358276 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} err="failed to get container status \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": rpc error: code = NotFound desc = could not find container \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": container with ID starting with 8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.358295 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.358613 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\": container with ID starting with 20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154 not found: ID does not exist" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.358654 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} err="failed to get container status \"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\": rpc error: code = NotFound desc = could not find container \"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\": container with ID starting with 20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.358686 4995 scope.go:117] "RemoveContainer" containerID="ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.358944 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\": container with ID starting with ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c not found: ID does not exist" containerID="ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.358977 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} err="failed to get container status \"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\": rpc error: code = NotFound desc = could not find container \"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\": container with ID starting with ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.358992 4995 scope.go:117] "RemoveContainer" containerID="7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.359177 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\": container with ID starting with 7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979 not found: ID does not exist" containerID="7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.359202 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} err="failed to get container status \"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\": rpc error: code = NotFound desc = could not find container \"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\": container with ID starting with 7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.359214 4995 scope.go:117] "RemoveContainer" containerID="06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.359464 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\": container with ID starting with 06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5 not found: ID does not exist" containerID="06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.359491 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} err="failed to get container status \"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\": rpc error: code = NotFound desc = could not find container \"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\": container with ID starting with 06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.359506 4995 scope.go:117] "RemoveContainer" containerID="f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.359740 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\": container with ID starting with f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737 not found: ID does not exist" containerID="f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.359764 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} err="failed to get container status \"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\": rpc error: code = NotFound desc = could not find container \"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\": container with ID starting with f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.359775 4995 scope.go:117] "RemoveContainer" containerID="3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.360025 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\": container with ID starting with 3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860 not found: ID does not exist" containerID="3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.360047 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} err="failed to get container status \"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\": rpc error: code = NotFound desc = could not find container \"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\": container with ID starting with 3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.360057 4995 scope.go:117] "RemoveContainer" containerID="6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.360299 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\": container with ID starting with 6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc not found: ID does not exist" containerID="6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.360321 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} err="failed to get container status \"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\": rpc error: code = NotFound desc = could not find container \"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\": container with ID starting with 6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.360332 4995 scope.go:117] "RemoveContainer" containerID="9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.360558 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\": container with ID starting with 9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608 not found: ID does not exist" containerID="9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.360577 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} err="failed to get container status \"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\": rpc error: code = NotFound desc = could not find container \"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\": container with ID starting with 9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.360593 4995 scope.go:117] "RemoveContainer" containerID="408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8" Jan 20 16:42:19 crc kubenswrapper[4995]: E0120 16:42:19.360788 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\": container with ID starting with 408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8 not found: ID does not exist" containerID="408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.360818 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8"} err="failed to get container status \"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\": rpc error: code = NotFound desc = could not find container \"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\": container with ID starting with 408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.360834 4995 scope.go:117] "RemoveContainer" containerID="8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.361064 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} err="failed to get container status \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": rpc error: code = NotFound desc = could not find container \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": container with ID starting with 8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.361128 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.361367 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} err="failed to get container status \"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\": rpc error: code = NotFound desc = could not find container \"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\": container with ID starting with 20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.361387 4995 scope.go:117] "RemoveContainer" containerID="ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.361584 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} err="failed to get container status \"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\": rpc error: code = NotFound desc = could not find container \"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\": container with ID starting with ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.361601 4995 scope.go:117] "RemoveContainer" containerID="7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.361892 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} err="failed to get container status \"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\": rpc error: code = NotFound desc = could not find container \"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\": container with ID starting with 7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.361915 4995 scope.go:117] "RemoveContainer" containerID="06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.362250 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} err="failed to get container status \"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\": rpc error: code = NotFound desc = could not find container \"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\": container with ID starting with 06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.362269 4995 scope.go:117] "RemoveContainer" containerID="f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.362573 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} err="failed to get container status \"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\": rpc error: code = NotFound desc = could not find container \"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\": container with ID starting with f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.362604 4995 scope.go:117] "RemoveContainer" containerID="3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.362813 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} err="failed to get container status \"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\": rpc error: code = NotFound desc = could not find container \"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\": container with ID starting with 3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.362891 4995 scope.go:117] "RemoveContainer" containerID="6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.363341 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} err="failed to get container status \"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\": rpc error: code = NotFound desc = could not find container \"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\": container with ID starting with 6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.363365 4995 scope.go:117] "RemoveContainer" containerID="9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.363637 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} err="failed to get container status \"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\": rpc error: code = NotFound desc = could not find container \"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\": container with ID starting with 9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.363655 4995 scope.go:117] "RemoveContainer" containerID="408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.363827 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8"} err="failed to get container status \"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\": rpc error: code = NotFound desc = could not find container \"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\": container with ID starting with 408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.363848 4995 scope.go:117] "RemoveContainer" containerID="8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.363988 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} err="failed to get container status \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": rpc error: code = NotFound desc = could not find container \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": container with ID starting with 8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.364010 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.364247 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} err="failed to get container status \"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\": rpc error: code = NotFound desc = could not find container \"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\": container with ID starting with 20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.364265 4995 scope.go:117] "RemoveContainer" containerID="ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.364467 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} err="failed to get container status \"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\": rpc error: code = NotFound desc = could not find container \"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\": container with ID starting with ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.364487 4995 scope.go:117] "RemoveContainer" containerID="7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.364673 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} err="failed to get container status \"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\": rpc error: code = NotFound desc = could not find container \"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\": container with ID starting with 7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.364693 4995 scope.go:117] "RemoveContainer" containerID="06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.364887 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} err="failed to get container status \"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\": rpc error: code = NotFound desc = could not find container \"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\": container with ID starting with 06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.364905 4995 scope.go:117] "RemoveContainer" containerID="f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.365134 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} err="failed to get container status \"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\": rpc error: code = NotFound desc = could not find container \"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\": container with ID starting with f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.365151 4995 scope.go:117] "RemoveContainer" containerID="3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.365308 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} err="failed to get container status \"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\": rpc error: code = NotFound desc = could not find container \"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\": container with ID starting with 3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.365325 4995 scope.go:117] "RemoveContainer" containerID="6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.365547 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} err="failed to get container status \"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\": rpc error: code = NotFound desc = could not find container \"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\": container with ID starting with 6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.365567 4995 scope.go:117] "RemoveContainer" containerID="9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.365877 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} err="failed to get container status \"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\": rpc error: code = NotFound desc = could not find container \"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\": container with ID starting with 9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.365896 4995 scope.go:117] "RemoveContainer" containerID="408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366156 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8"} err="failed to get container status \"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\": rpc error: code = NotFound desc = could not find container \"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\": container with ID starting with 408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366169 4995 scope.go:117] "RemoveContainer" containerID="8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366366 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} err="failed to get container status \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": rpc error: code = NotFound desc = could not find container \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": container with ID starting with 8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366381 4995 scope.go:117] "RemoveContainer" containerID="20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366558 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154"} err="failed to get container status \"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\": rpc error: code = NotFound desc = could not find container \"20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154\": container with ID starting with 20fc442f37a6005d644265c90b0d9d45dda8f035d484db93d84195112f257154 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366570 4995 scope.go:117] "RemoveContainer" containerID="ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366738 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c"} err="failed to get container status \"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\": rpc error: code = NotFound desc = could not find container \"ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c\": container with ID starting with ba0bf6535ffa4d01aebb0cdf9481f79fe571d93556b272acb0ee914a346e8a6c not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366750 4995 scope.go:117] "RemoveContainer" containerID="7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366918 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979"} err="failed to get container status \"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\": rpc error: code = NotFound desc = could not find container \"7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979\": container with ID starting with 7c5298e600dd389f3287a855debf4bb3768ec7808e475adb263201a3a3b4e979 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.366934 4995 scope.go:117] "RemoveContainer" containerID="06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.367181 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5"} err="failed to get container status \"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\": rpc error: code = NotFound desc = could not find container \"06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5\": container with ID starting with 06248cad84b84302151277ced015e6d4c0752f8467fe62d60a87e28047d9add5 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.367195 4995 scope.go:117] "RemoveContainer" containerID="f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.367441 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737"} err="failed to get container status \"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\": rpc error: code = NotFound desc = could not find container \"f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737\": container with ID starting with f93160cfa9de9f730270babd2fbce86b3ca1702504d386d5e8eb4fb10c68c737 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.367460 4995 scope.go:117] "RemoveContainer" containerID="3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.367647 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860"} err="failed to get container status \"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\": rpc error: code = NotFound desc = could not find container \"3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860\": container with ID starting with 3c10fdac63a6fea2ce930882861857cc439dc7fd722808a1bc3e42ab594cb860 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.367668 4995 scope.go:117] "RemoveContainer" containerID="6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.367895 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc"} err="failed to get container status \"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\": rpc error: code = NotFound desc = could not find container \"6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc\": container with ID starting with 6a148bdd4a970ac97d62d903d1d04a421e3ae1e8fa24c7211bcb97daef1570bc not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.367913 4995 scope.go:117] "RemoveContainer" containerID="9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.368146 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608"} err="failed to get container status \"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\": rpc error: code = NotFound desc = could not find container \"9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608\": container with ID starting with 9375a103ff5667431e390613a85a51a9fccc5a555818f5283025bd52359a8608 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.368164 4995 scope.go:117] "RemoveContainer" containerID="408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.368375 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8"} err="failed to get container status \"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\": rpc error: code = NotFound desc = could not find container \"408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8\": container with ID starting with 408d3aa23938bb219e3604a622fa4c7d7243734032aea6950a05377a128714f8 not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.368394 4995 scope.go:117] "RemoveContainer" containerID="8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.368552 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c"} err="failed to get container status \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": rpc error: code = NotFound desc = could not find container \"8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c\": container with ID starting with 8f290cd108e9b0df614b6deb3ce6088f4741cfda347d072d902eee0fd582c44c not found: ID does not exist" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.412971 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-kubelet\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413010 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-var-lib-openvswitch\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413041 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-systemd-units\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413053 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-kubelet\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413068 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-log-socket\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413100 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-var-lib-openvswitch\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413112 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-cni-netd\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413126 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-log-socket\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413110 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-systemd-units\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413145 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-cni-netd\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413155 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-cni-bin\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413216 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-etc-openvswitch\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413238 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-etc-openvswitch\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413241 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-env-overrides\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413278 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9794z\" (UniqueName: \"kubernetes.io/projected/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-kube-api-access-9794z\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413301 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-run-ovn\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413321 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-run-systemd\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413342 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-slash\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413182 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-cni-bin\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413364 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413384 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-slash\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413395 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-run-netns\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413398 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-run-systemd\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413417 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-run-openvswitch\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413428 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-run-netns\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413407 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413440 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-run-ovn\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413447 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-ovn-node-metrics-cert\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413452 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-run-openvswitch\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413480 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-ovnkube-config\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413531 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-node-log\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413557 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-run-ovn-kubernetes\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413577 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-ovnkube-script-lib\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413646 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptmxc\" (UniqueName: \"kubernetes.io/projected/e82420c5-a3ae-43ea-a208-b757794521a6-kube-api-access-ptmxc\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413705 4995 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/e82420c5-a3ae-43ea-a208-b757794521a6-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413719 4995 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/e82420c5-a3ae-43ea-a208-b757794521a6-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413854 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-node-log\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413894 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-env-overrides\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413953 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-host-run-ovn-kubernetes\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.413967 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-ovnkube-config\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.414668 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-ovnkube-script-lib\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.418249 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-ovn-node-metrics-cert\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.432494 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9794z\" (UniqueName: \"kubernetes.io/projected/36ac8889-6bd6-40c9-8188-2bf7fa69c9d9-kube-api-access-9794z\") pod \"ovnkube-node-t72rn\" (UID: \"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9\") " pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.501644 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.557567 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qp9h9"] Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.565340 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qp9h9"] Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.825699 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-zngmd" Jan 20 16:42:19 crc kubenswrapper[4995]: I0120 16:42:19.994778 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e82420c5-a3ae-43ea-a208-b757794521a6" path="/var/lib/kubelet/pods/e82420c5-a3ae-43ea-a208-b757794521a6/volumes" Jan 20 16:42:20 crc kubenswrapper[4995]: I0120 16:42:20.200016 4995 generic.go:334] "Generic (PLEG): container finished" podID="36ac8889-6bd6-40c9-8188-2bf7fa69c9d9" containerID="b8f0152d2b9822f5eb8f772b73b3c16b27591a49e7505ea23acc12f20fd3839b" exitCode=0 Jan 20 16:42:20 crc kubenswrapper[4995]: I0120 16:42:20.200095 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerDied","Data":"b8f0152d2b9822f5eb8f772b73b3c16b27591a49e7505ea23acc12f20fd3839b"} Jan 20 16:42:20 crc kubenswrapper[4995]: I0120 16:42:20.200402 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerStarted","Data":"a19151c4d068f6f939201a8404e125635072f8625f22073df4d3dd99a4d5b9f2"} Jan 20 16:42:21 crc kubenswrapper[4995]: I0120 16:42:21.212179 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerStarted","Data":"e43612897831482409a77b608f9f64401ee292f5f1c60958d857f132375c043f"} Jan 20 16:42:21 crc kubenswrapper[4995]: I0120 16:42:21.212394 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerStarted","Data":"8f2190d7cb7cc8cc107dbd19f4f62de4c7e38980c8477e992578ba1ccb02b906"} Jan 20 16:42:21 crc kubenswrapper[4995]: I0120 16:42:21.212406 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerStarted","Data":"0518fe511d51b1f1f7836a200d3edf6d1294d241119f7e0306deabfa9505494a"} Jan 20 16:42:21 crc kubenswrapper[4995]: I0120 16:42:21.212416 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerStarted","Data":"d1908ba32f593630de4a94769a0581915682736c0eacf6967123d3bdf63369ab"} Jan 20 16:42:21 crc kubenswrapper[4995]: I0120 16:42:21.212424 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerStarted","Data":"c863b0b561c9c37d389424639ffdf19a4bd19ecba5f74378cf6a159acc0ec14f"} Jan 20 16:42:21 crc kubenswrapper[4995]: I0120 16:42:21.212434 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerStarted","Data":"e544854b1244a54f864620d2343908693d904e05996be0b285acadb42ddb37f4"} Jan 20 16:42:22 crc kubenswrapper[4995]: I0120 16:42:22.488987 4995 scope.go:117] "RemoveContainer" containerID="1ed2e2151edff4a6bfa2e0a59376032535d5f8361ceefa4c267ed60eb0365842" Jan 20 16:42:23 crc kubenswrapper[4995]: I0120 16:42:23.229805 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/2.log" Jan 20 16:42:24 crc kubenswrapper[4995]: I0120 16:42:24.242672 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerStarted","Data":"c7946c2f91a7c343b8c98adad8697908238bc7f13a03aabe773f9cf37033774b"} Jan 20 16:42:26 crc kubenswrapper[4995]: I0120 16:42:26.258893 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" event={"ID":"36ac8889-6bd6-40c9-8188-2bf7fa69c9d9","Type":"ContainerStarted","Data":"77815fa4ee7aaeb0b1fe2fcf3655826f4a933bf5391e263470058239c0661bd0"} Jan 20 16:42:26 crc kubenswrapper[4995]: I0120 16:42:26.259456 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:26 crc kubenswrapper[4995]: I0120 16:42:26.259473 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:26 crc kubenswrapper[4995]: I0120 16:42:26.259484 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:26 crc kubenswrapper[4995]: I0120 16:42:26.284606 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:26 crc kubenswrapper[4995]: I0120 16:42:26.285068 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:26 crc kubenswrapper[4995]: I0120 16:42:26.291634 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" podStartSLOduration=7.291613546 podStartE2EDuration="7.291613546s" podCreationTimestamp="2026-01-20 16:42:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:42:26.287332897 +0000 UTC m=+664.531937703" watchObservedRunningTime="2026-01-20 16:42:26.291613546 +0000 UTC m=+664.536218352" Jan 20 16:42:32 crc kubenswrapper[4995]: I0120 16:42:32.990340 4995 scope.go:117] "RemoveContainer" containerID="ae12658acf3b63bc36cb1271992b7137508cfafa7404490f7b7e5544d8dd1545" Jan 20 16:42:32 crc kubenswrapper[4995]: E0120 16:42:32.991257 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-vlvwg_openshift-multus(5008a882-4540-4ebe-8a27-53f0de0cbd4a)\"" pod="openshift-multus/multus-vlvwg" podUID="5008a882-4540-4ebe-8a27-53f0de0cbd4a" Jan 20 16:42:46 crc kubenswrapper[4995]: I0120 16:42:46.991189 4995 scope.go:117] "RemoveContainer" containerID="ae12658acf3b63bc36cb1271992b7137508cfafa7404490f7b7e5544d8dd1545" Jan 20 16:42:47 crc kubenswrapper[4995]: I0120 16:42:47.390332 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/2.log" Jan 20 16:42:47 crc kubenswrapper[4995]: I0120 16:42:47.390903 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-vlvwg" event={"ID":"5008a882-4540-4ebe-8a27-53f0de0cbd4a","Type":"ContainerStarted","Data":"66638b87f6bea2800fead20b863339ffc74c7e9d47774fef211a0b34180731fa"} Jan 20 16:42:49 crc kubenswrapper[4995]: I0120 16:42:49.528409 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-t72rn" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.368402 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc"] Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.369916 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.375601 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.378957 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc"] Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.397732 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gv6p9\" (UniqueName: \"kubernetes.io/projected/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-kube-api-access-gv6p9\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.397851 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.397895 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.499326 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gv6p9\" (UniqueName: \"kubernetes.io/projected/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-kube-api-access-gv6p9\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.499626 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.499653 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.500059 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.500165 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.520742 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gv6p9\" (UniqueName: \"kubernetes.io/projected/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-kube-api-access-gv6p9\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:52 crc kubenswrapper[4995]: I0120 16:42:52.694276 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:53 crc kubenswrapper[4995]: I0120 16:42:53.184477 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc"] Jan 20 16:42:53 crc kubenswrapper[4995]: I0120 16:42:53.431824 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" event={"ID":"bb24643a-1c98-49d5-a82c-53b3f9fb88f6","Type":"ContainerStarted","Data":"c7559ad8283fe1a836617eb2aa248f9be053b91e1bd4f7d7cf28bd583b387bff"} Jan 20 16:42:53 crc kubenswrapper[4995]: I0120 16:42:53.432288 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" event={"ID":"bb24643a-1c98-49d5-a82c-53b3f9fb88f6","Type":"ContainerStarted","Data":"8261a478b8bc2471acd57c4cac150522fdbf832f388e3654d06e55ef4ac40730"} Jan 20 16:42:54 crc kubenswrapper[4995]: I0120 16:42:54.444567 4995 generic.go:334] "Generic (PLEG): container finished" podID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerID="c7559ad8283fe1a836617eb2aa248f9be053b91e1bd4f7d7cf28bd583b387bff" exitCode=0 Jan 20 16:42:54 crc kubenswrapper[4995]: I0120 16:42:54.444631 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" event={"ID":"bb24643a-1c98-49d5-a82c-53b3f9fb88f6","Type":"ContainerDied","Data":"c7559ad8283fe1a836617eb2aa248f9be053b91e1bd4f7d7cf28bd583b387bff"} Jan 20 16:42:56 crc kubenswrapper[4995]: I0120 16:42:56.459915 4995 generic.go:334] "Generic (PLEG): container finished" podID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerID="53e8e83bf64201ca73097b5ea87626ff09f2e5f0dc9458c79ec2681087c65b46" exitCode=0 Jan 20 16:42:56 crc kubenswrapper[4995]: I0120 16:42:56.459996 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" event={"ID":"bb24643a-1c98-49d5-a82c-53b3f9fb88f6","Type":"ContainerDied","Data":"53e8e83bf64201ca73097b5ea87626ff09f2e5f0dc9458c79ec2681087c65b46"} Jan 20 16:42:57 crc kubenswrapper[4995]: I0120 16:42:57.468901 4995 generic.go:334] "Generic (PLEG): container finished" podID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerID="68f2b993e07a448d935d9f874fed6f329f2d4eb6966aee83e42056ddaef7ce1f" exitCode=0 Jan 20 16:42:57 crc kubenswrapper[4995]: I0120 16:42:57.468957 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" event={"ID":"bb24643a-1c98-49d5-a82c-53b3f9fb88f6","Type":"ContainerDied","Data":"68f2b993e07a448d935d9f874fed6f329f2d4eb6966aee83e42056ddaef7ce1f"} Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.775895 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.881436 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-bundle\") pod \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.881958 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-util\") pod \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.882035 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gv6p9\" (UniqueName: \"kubernetes.io/projected/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-kube-api-access-gv6p9\") pod \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\" (UID: \"bb24643a-1c98-49d5-a82c-53b3f9fb88f6\") " Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.884925 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-bundle" (OuterVolumeSpecName: "bundle") pod "bb24643a-1c98-49d5-a82c-53b3f9fb88f6" (UID: "bb24643a-1c98-49d5-a82c-53b3f9fb88f6"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.890750 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-kube-api-access-gv6p9" (OuterVolumeSpecName: "kube-api-access-gv6p9") pod "bb24643a-1c98-49d5-a82c-53b3f9fb88f6" (UID: "bb24643a-1c98-49d5-a82c-53b3f9fb88f6"). InnerVolumeSpecName "kube-api-access-gv6p9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.921438 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-util" (OuterVolumeSpecName: "util") pod "bb24643a-1c98-49d5-a82c-53b3f9fb88f6" (UID: "bb24643a-1c98-49d5-a82c-53b3f9fb88f6"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.983727 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gv6p9\" (UniqueName: \"kubernetes.io/projected/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-kube-api-access-gv6p9\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.983760 4995 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:58 crc kubenswrapper[4995]: I0120 16:42:58.983770 4995 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bb24643a-1c98-49d5-a82c-53b3f9fb88f6-util\") on node \"crc\" DevicePath \"\"" Jan 20 16:42:59 crc kubenswrapper[4995]: I0120 16:42:59.486884 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" event={"ID":"bb24643a-1c98-49d5-a82c-53b3f9fb88f6","Type":"ContainerDied","Data":"8261a478b8bc2471acd57c4cac150522fdbf832f388e3654d06e55ef4ac40730"} Jan 20 16:42:59 crc kubenswrapper[4995]: I0120 16:42:59.486941 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc" Jan 20 16:42:59 crc kubenswrapper[4995]: I0120 16:42:59.486957 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8261a478b8bc2471acd57c4cac150522fdbf832f388e3654d06e55ef4ac40730" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.908755 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9"] Jan 20 16:43:09 crc kubenswrapper[4995]: E0120 16:43:09.909636 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerName="pull" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.909652 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerName="pull" Jan 20 16:43:09 crc kubenswrapper[4995]: E0120 16:43:09.909669 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerName="extract" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.909678 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerName="extract" Jan 20 16:43:09 crc kubenswrapper[4995]: E0120 16:43:09.909692 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerName="util" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.909702 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerName="util" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.909822 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb24643a-1c98-49d5-a82c-53b3f9fb88f6" containerName="extract" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.910282 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.912000 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.912240 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.916562 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-6l6d5" Jan 20 16:43:09 crc kubenswrapper[4995]: I0120 16:43:09.926378 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.009021 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kt7k\" (UniqueName: \"kubernetes.io/projected/5def50d1-b2d8-447a-8f22-8632fd26d689-kube-api-access-6kt7k\") pod \"obo-prometheus-operator-68bc856cb9-q94m9\" (UID: \"5def50d1-b2d8-447a-8f22-8632fd26d689\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.038929 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.039523 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.045261 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-t2r2t" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.045640 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.050044 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.050926 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.074697 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.112702 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kt7k\" (UniqueName: \"kubernetes.io/projected/5def50d1-b2d8-447a-8f22-8632fd26d689-kube-api-access-6kt7k\") pod \"obo-prometheus-operator-68bc856cb9-q94m9\" (UID: \"5def50d1-b2d8-447a-8f22-8632fd26d689\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.145175 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.167106 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kt7k\" (UniqueName: \"kubernetes.io/projected/5def50d1-b2d8-447a-8f22-8632fd26d689-kube-api-access-6kt7k\") pod \"obo-prometheus-operator-68bc856cb9-q94m9\" (UID: \"5def50d1-b2d8-447a-8f22-8632fd26d689\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.213898 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/27395fe5-dac8-4556-8446-a478ea8f7928-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl\" (UID: \"27395fe5-dac8-4556-8446-a478ea8f7928\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.213956 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/15bbd7b9-457e-4456-ba6a-5f664a592bab-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77\" (UID: \"15bbd7b9-457e-4456-ba6a-5f664a592bab\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.214449 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/15bbd7b9-457e-4456-ba6a-5f664a592bab-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77\" (UID: \"15bbd7b9-457e-4456-ba6a-5f664a592bab\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.214593 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/27395fe5-dac8-4556-8446-a478ea8f7928-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl\" (UID: \"27395fe5-dac8-4556-8446-a478ea8f7928\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.230222 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.247745 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-h2hm4"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.248655 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.251384 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-gfjcg" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.252237 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.263541 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-h2hm4"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.315752 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/15bbd7b9-457e-4456-ba6a-5f664a592bab-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77\" (UID: \"15bbd7b9-457e-4456-ba6a-5f664a592bab\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.315801 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/27395fe5-dac8-4556-8446-a478ea8f7928-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl\" (UID: \"27395fe5-dac8-4556-8446-a478ea8f7928\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.315831 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/27395fe5-dac8-4556-8446-a478ea8f7928-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl\" (UID: \"27395fe5-dac8-4556-8446-a478ea8f7928\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.315854 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/15bbd7b9-457e-4456-ba6a-5f664a592bab-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77\" (UID: \"15bbd7b9-457e-4456-ba6a-5f664a592bab\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.321789 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/27395fe5-dac8-4556-8446-a478ea8f7928-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl\" (UID: \"27395fe5-dac8-4556-8446-a478ea8f7928\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.325811 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/15bbd7b9-457e-4456-ba6a-5f664a592bab-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77\" (UID: \"15bbd7b9-457e-4456-ba6a-5f664a592bab\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.331606 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/27395fe5-dac8-4556-8446-a478ea8f7928-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl\" (UID: \"27395fe5-dac8-4556-8446-a478ea8f7928\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.335846 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/15bbd7b9-457e-4456-ba6a-5f664a592bab-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77\" (UID: \"15bbd7b9-457e-4456-ba6a-5f664a592bab\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.345787 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-thhmm"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.346714 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.350588 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-ff2sx" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.358781 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.365034 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-thhmm"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.385050 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.417546 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/decae503-5765-4258-9081-981c2215ebcf-observability-operator-tls\") pod \"observability-operator-59bdc8b94-h2hm4\" (UID: \"decae503-5765-4258-9081-981c2215ebcf\") " pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.417666 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmsls\" (UniqueName: \"kubernetes.io/projected/502777ef-bdd5-4d42-b695-a7259cd811c9-kube-api-access-zmsls\") pod \"perses-operator-5bf474d74f-thhmm\" (UID: \"502777ef-bdd5-4d42-b695-a7259cd811c9\") " pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.417709 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng2q4\" (UniqueName: \"kubernetes.io/projected/decae503-5765-4258-9081-981c2215ebcf-kube-api-access-ng2q4\") pod \"observability-operator-59bdc8b94-h2hm4\" (UID: \"decae503-5765-4258-9081-981c2215ebcf\") " pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.417729 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/502777ef-bdd5-4d42-b695-a7259cd811c9-openshift-service-ca\") pod \"perses-operator-5bf474d74f-thhmm\" (UID: \"502777ef-bdd5-4d42-b695-a7259cd811c9\") " pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.524272 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmsls\" (UniqueName: \"kubernetes.io/projected/502777ef-bdd5-4d42-b695-a7259cd811c9-kube-api-access-zmsls\") pod \"perses-operator-5bf474d74f-thhmm\" (UID: \"502777ef-bdd5-4d42-b695-a7259cd811c9\") " pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.524354 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng2q4\" (UniqueName: \"kubernetes.io/projected/decae503-5765-4258-9081-981c2215ebcf-kube-api-access-ng2q4\") pod \"observability-operator-59bdc8b94-h2hm4\" (UID: \"decae503-5765-4258-9081-981c2215ebcf\") " pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.524385 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/502777ef-bdd5-4d42-b695-a7259cd811c9-openshift-service-ca\") pod \"perses-operator-5bf474d74f-thhmm\" (UID: \"502777ef-bdd5-4d42-b695-a7259cd811c9\") " pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.524433 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/decae503-5765-4258-9081-981c2215ebcf-observability-operator-tls\") pod \"observability-operator-59bdc8b94-h2hm4\" (UID: \"decae503-5765-4258-9081-981c2215ebcf\") " pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.525869 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/502777ef-bdd5-4d42-b695-a7259cd811c9-openshift-service-ca\") pod \"perses-operator-5bf474d74f-thhmm\" (UID: \"502777ef-bdd5-4d42-b695-a7259cd811c9\") " pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.528646 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/decae503-5765-4258-9081-981c2215ebcf-observability-operator-tls\") pod \"observability-operator-59bdc8b94-h2hm4\" (UID: \"decae503-5765-4258-9081-981c2215ebcf\") " pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.549696 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmsls\" (UniqueName: \"kubernetes.io/projected/502777ef-bdd5-4d42-b695-a7259cd811c9-kube-api-access-zmsls\") pod \"perses-operator-5bf474d74f-thhmm\" (UID: \"502777ef-bdd5-4d42-b695-a7259cd811c9\") " pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.549819 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng2q4\" (UniqueName: \"kubernetes.io/projected/decae503-5765-4258-9081-981c2215ebcf-kube-api-access-ng2q4\") pod \"observability-operator-59bdc8b94-h2hm4\" (UID: \"decae503-5765-4258-9081-981c2215ebcf\") " pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.590945 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.594812 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77"] Jan 20 16:43:10 crc kubenswrapper[4995]: W0120 16:43:10.624191 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15bbd7b9_457e_4456_ba6a_5f664a592bab.slice/crio-e108fff21d371d67d29518cbc1fb5fa63478fc1031192b15881756dc935daf11 WatchSource:0}: Error finding container e108fff21d371d67d29518cbc1fb5fa63478fc1031192b15881756dc935daf11: Status 404 returned error can't find the container with id e108fff21d371d67d29518cbc1fb5fa63478fc1031192b15881756dc935daf11 Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.667450 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl"] Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.677227 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.700235 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9"] Jan 20 16:43:10 crc kubenswrapper[4995]: W0120 16:43:10.735061 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5def50d1_b2d8_447a_8f22_8632fd26d689.slice/crio-04c028d78380d407b9bddd02d3cd5cf69423cd816cfae9c903bbb3e63518e0bf WatchSource:0}: Error finding container 04c028d78380d407b9bddd02d3cd5cf69423cd816cfae9c903bbb3e63518e0bf: Status 404 returned error can't find the container with id 04c028d78380d407b9bddd02d3cd5cf69423cd816cfae9c903bbb3e63518e0bf Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.865594 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-h2hm4"] Jan 20 16:43:10 crc kubenswrapper[4995]: W0120 16:43:10.876521 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddecae503_5765_4258_9081_981c2215ebcf.slice/crio-46738a605641e88f52bbe7d9aaf02cc367d3738c7a1677a283e155b8da52147a WatchSource:0}: Error finding container 46738a605641e88f52bbe7d9aaf02cc367d3738c7a1677a283e155b8da52147a: Status 404 returned error can't find the container with id 46738a605641e88f52bbe7d9aaf02cc367d3738c7a1677a283e155b8da52147a Jan 20 16:43:10 crc kubenswrapper[4995]: I0120 16:43:10.916789 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-thhmm"] Jan 20 16:43:10 crc kubenswrapper[4995]: W0120 16:43:10.923134 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod502777ef_bdd5_4d42_b695_a7259cd811c9.slice/crio-7ea771eb87f35d227cfd6fd3d476ab0b93556c79d952e1d2d0d70e41a9c5d06e WatchSource:0}: Error finding container 7ea771eb87f35d227cfd6fd3d476ab0b93556c79d952e1d2d0d70e41a9c5d06e: Status 404 returned error can't find the container with id 7ea771eb87f35d227cfd6fd3d476ab0b93556c79d952e1d2d0d70e41a9c5d06e Jan 20 16:43:11 crc kubenswrapper[4995]: I0120 16:43:11.586170 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-thhmm" event={"ID":"502777ef-bdd5-4d42-b695-a7259cd811c9","Type":"ContainerStarted","Data":"7ea771eb87f35d227cfd6fd3d476ab0b93556c79d952e1d2d0d70e41a9c5d06e"} Jan 20 16:43:11 crc kubenswrapper[4995]: I0120 16:43:11.587118 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" event={"ID":"15bbd7b9-457e-4456-ba6a-5f664a592bab","Type":"ContainerStarted","Data":"e108fff21d371d67d29518cbc1fb5fa63478fc1031192b15881756dc935daf11"} Jan 20 16:43:11 crc kubenswrapper[4995]: I0120 16:43:11.587955 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" event={"ID":"decae503-5765-4258-9081-981c2215ebcf","Type":"ContainerStarted","Data":"46738a605641e88f52bbe7d9aaf02cc367d3738c7a1677a283e155b8da52147a"} Jan 20 16:43:11 crc kubenswrapper[4995]: I0120 16:43:11.588799 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9" event={"ID":"5def50d1-b2d8-447a-8f22-8632fd26d689","Type":"ContainerStarted","Data":"04c028d78380d407b9bddd02d3cd5cf69423cd816cfae9c903bbb3e63518e0bf"} Jan 20 16:43:11 crc kubenswrapper[4995]: I0120 16:43:11.592947 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" event={"ID":"27395fe5-dac8-4556-8446-a478ea8f7928","Type":"ContainerStarted","Data":"372b8d865f5a0c958c2fde55b2427cadd4d1a9fe748381215c7dc2e1af758638"} Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.681666 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" event={"ID":"27395fe5-dac8-4556-8446-a478ea8f7928","Type":"ContainerStarted","Data":"09243ca2b7655b560a54ac7dfb78ff7e4536a0bbee894b14dff6dada1a2b3b1b"} Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.692309 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-thhmm" event={"ID":"502777ef-bdd5-4d42-b695-a7259cd811c9","Type":"ContainerStarted","Data":"71348b850b76bb38a60588d9e77ab6777c9558b2b80279e0866a1d45c530a1f5"} Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.692545 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.694123 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" event={"ID":"15bbd7b9-457e-4456-ba6a-5f664a592bab","Type":"ContainerStarted","Data":"a60851a6ae1b9b3d5a1606e77296657fa1c709911948f4d68736bba39798ba8c"} Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.695841 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" event={"ID":"decae503-5765-4258-9081-981c2215ebcf","Type":"ContainerStarted","Data":"830a7bfa5883095207a12fc0cd0c762cadd85f008f21ad3c32ca1280c0ebcde8"} Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.696148 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.702986 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9" event={"ID":"5def50d1-b2d8-447a-8f22-8632fd26d689","Type":"ContainerStarted","Data":"82265bc3b34982f05ca33d0f5457f5bd5144f46c344b1a0e58c59585975c12e9"} Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.706322 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl" podStartSLOduration=1.6947008110000001 podStartE2EDuration="14.706297669s" podCreationTimestamp="2026-01-20 16:43:10 +0000 UTC" firstStartedPulling="2026-01-20 16:43:10.685415145 +0000 UTC m=+708.930019951" lastFinishedPulling="2026-01-20 16:43:23.697011993 +0000 UTC m=+721.941616809" observedRunningTime="2026-01-20 16:43:24.699407669 +0000 UTC m=+722.944012475" watchObservedRunningTime="2026-01-20 16:43:24.706297669 +0000 UTC m=+722.950902515" Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.709640 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.758525 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-thhmm" podStartSLOduration=1.961454554 podStartE2EDuration="14.758509012s" podCreationTimestamp="2026-01-20 16:43:10 +0000 UTC" firstStartedPulling="2026-01-20 16:43:10.925421525 +0000 UTC m=+709.170026321" lastFinishedPulling="2026-01-20 16:43:23.722475973 +0000 UTC m=+721.967080779" observedRunningTime="2026-01-20 16:43:24.733188617 +0000 UTC m=+722.977793413" watchObservedRunningTime="2026-01-20 16:43:24.758509012 +0000 UTC m=+723.003113828" Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.783845 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-h2hm4" podStartSLOduration=1.9648478169999999 podStartE2EDuration="14.783830307s" podCreationTimestamp="2026-01-20 16:43:10 +0000 UTC" firstStartedPulling="2026-01-20 16:43:10.878953459 +0000 UTC m=+709.123558265" lastFinishedPulling="2026-01-20 16:43:23.697935949 +0000 UTC m=+721.942540755" observedRunningTime="2026-01-20 16:43:24.761148435 +0000 UTC m=+723.005753241" watchObservedRunningTime="2026-01-20 16:43:24.783830307 +0000 UTC m=+723.028435113" Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.786437 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77" podStartSLOduration=1.689413505 podStartE2EDuration="14.786432749s" podCreationTimestamp="2026-01-20 16:43:10 +0000 UTC" firstStartedPulling="2026-01-20 16:43:10.627112174 +0000 UTC m=+708.871716980" lastFinishedPulling="2026-01-20 16:43:23.724131418 +0000 UTC m=+721.968736224" observedRunningTime="2026-01-20 16:43:24.783995423 +0000 UTC m=+723.028600229" watchObservedRunningTime="2026-01-20 16:43:24.786432749 +0000 UTC m=+723.031037545" Jan 20 16:43:24 crc kubenswrapper[4995]: I0120 16:43:24.823644 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-q94m9" podStartSLOduration=2.863449375 podStartE2EDuration="15.823623291s" podCreationTimestamp="2026-01-20 16:43:09 +0000 UTC" firstStartedPulling="2026-01-20 16:43:10.736839127 +0000 UTC m=+708.981443933" lastFinishedPulling="2026-01-20 16:43:23.697013043 +0000 UTC m=+721.941617849" observedRunningTime="2026-01-20 16:43:24.822104139 +0000 UTC m=+723.066708945" watchObservedRunningTime="2026-01-20 16:43:24.823623291 +0000 UTC m=+723.068228097" Jan 20 16:43:30 crc kubenswrapper[4995]: I0120 16:43:30.572136 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:43:30 crc kubenswrapper[4995]: I0120 16:43:30.572644 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:43:30 crc kubenswrapper[4995]: I0120 16:43:30.679867 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-thhmm" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.689878 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp"] Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.693095 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.695162 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.701707 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp"] Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.741661 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9gdl\" (UniqueName: \"kubernetes.io/projected/66d983b8-16a0-44ba-8e76-c1a6645c2001-kube-api-access-w9gdl\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.741732 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.741753 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.842889 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9gdl\" (UniqueName: \"kubernetes.io/projected/66d983b8-16a0-44ba-8e76-c1a6645c2001-kube-api-access-w9gdl\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.842959 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.842988 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.843447 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.843565 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:49 crc kubenswrapper[4995]: I0120 16:43:49.862593 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9gdl\" (UniqueName: \"kubernetes.io/projected/66d983b8-16a0-44ba-8e76-c1a6645c2001-kube-api-access-w9gdl\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:50 crc kubenswrapper[4995]: I0120 16:43:50.008544 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:50 crc kubenswrapper[4995]: I0120 16:43:50.302733 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp"] Jan 20 16:43:50 crc kubenswrapper[4995]: I0120 16:43:50.866404 4995 generic.go:334] "Generic (PLEG): container finished" podID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerID="ef0c3f11909015d09b9a1e27311cb5fcaff1f35585a3eec06ec1e30668a4c96e" exitCode=0 Jan 20 16:43:50 crc kubenswrapper[4995]: I0120 16:43:50.866449 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" event={"ID":"66d983b8-16a0-44ba-8e76-c1a6645c2001","Type":"ContainerDied","Data":"ef0c3f11909015d09b9a1e27311cb5fcaff1f35585a3eec06ec1e30668a4c96e"} Jan 20 16:43:50 crc kubenswrapper[4995]: I0120 16:43:50.866477 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" event={"ID":"66d983b8-16a0-44ba-8e76-c1a6645c2001","Type":"ContainerStarted","Data":"6b75c32b68428a578d8375615e503048ae9a332a38f81f004f5370e0fbf7891e"} Jan 20 16:43:56 crc kubenswrapper[4995]: I0120 16:43:56.625166 4995 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 20 16:43:56 crc kubenswrapper[4995]: I0120 16:43:56.910017 4995 generic.go:334] "Generic (PLEG): container finished" podID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerID="e27f1f200055d377cea3ec64a7926979307935f0b122037e981ab425acacd202" exitCode=0 Jan 20 16:43:56 crc kubenswrapper[4995]: I0120 16:43:56.910144 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" event={"ID":"66d983b8-16a0-44ba-8e76-c1a6645c2001","Type":"ContainerDied","Data":"e27f1f200055d377cea3ec64a7926979307935f0b122037e981ab425acacd202"} Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.644443 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dh6f7"] Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.646567 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.657460 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dh6f7"] Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.660488 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqj8d\" (UniqueName: \"kubernetes.io/projected/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-kube-api-access-bqj8d\") pod \"redhat-operators-dh6f7\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.660938 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-catalog-content\") pod \"redhat-operators-dh6f7\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.661218 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-utilities\") pod \"redhat-operators-dh6f7\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.763410 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-catalog-content\") pod \"redhat-operators-dh6f7\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.763498 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-utilities\") pod \"redhat-operators-dh6f7\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.763594 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqj8d\" (UniqueName: \"kubernetes.io/projected/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-kube-api-access-bqj8d\") pod \"redhat-operators-dh6f7\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.764370 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-catalog-content\") pod \"redhat-operators-dh6f7\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.764369 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-utilities\") pod \"redhat-operators-dh6f7\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.788265 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqj8d\" (UniqueName: \"kubernetes.io/projected/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-kube-api-access-bqj8d\") pod \"redhat-operators-dh6f7\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.918054 4995 generic.go:334] "Generic (PLEG): container finished" podID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerID="d7d2e37d251d2f585181891707d286b1d4e9ad7750e10491d7ec47e92710eb31" exitCode=0 Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.918149 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" event={"ID":"66d983b8-16a0-44ba-8e76-c1a6645c2001","Type":"ContainerDied","Data":"d7d2e37d251d2f585181891707d286b1d4e9ad7750e10491d7ec47e92710eb31"} Jan 20 16:43:57 crc kubenswrapper[4995]: I0120 16:43:57.976470 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:43:58 crc kubenswrapper[4995]: I0120 16:43:58.404518 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dh6f7"] Jan 20 16:43:58 crc kubenswrapper[4995]: I0120 16:43:58.926500 4995 generic.go:334] "Generic (PLEG): container finished" podID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerID="452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487" exitCode=0 Jan 20 16:43:58 crc kubenswrapper[4995]: I0120 16:43:58.926815 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dh6f7" event={"ID":"4ac2c3fa-f095-43ea-95ab-cf9bee60a996","Type":"ContainerDied","Data":"452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487"} Jan 20 16:43:58 crc kubenswrapper[4995]: I0120 16:43:58.926951 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dh6f7" event={"ID":"4ac2c3fa-f095-43ea-95ab-cf9bee60a996","Type":"ContainerStarted","Data":"16512ed0781e0ad4ae335a406a3408356b1c6b46b9139306900745be2c6cd464"} Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.186539 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.281892 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-bundle\") pod \"66d983b8-16a0-44ba-8e76-c1a6645c2001\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.282034 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-util\") pod \"66d983b8-16a0-44ba-8e76-c1a6645c2001\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.282099 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9gdl\" (UniqueName: \"kubernetes.io/projected/66d983b8-16a0-44ba-8e76-c1a6645c2001-kube-api-access-w9gdl\") pod \"66d983b8-16a0-44ba-8e76-c1a6645c2001\" (UID: \"66d983b8-16a0-44ba-8e76-c1a6645c2001\") " Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.282747 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-bundle" (OuterVolumeSpecName: "bundle") pod "66d983b8-16a0-44ba-8e76-c1a6645c2001" (UID: "66d983b8-16a0-44ba-8e76-c1a6645c2001"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.287161 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66d983b8-16a0-44ba-8e76-c1a6645c2001-kube-api-access-w9gdl" (OuterVolumeSpecName: "kube-api-access-w9gdl") pod "66d983b8-16a0-44ba-8e76-c1a6645c2001" (UID: "66d983b8-16a0-44ba-8e76-c1a6645c2001"). InnerVolumeSpecName "kube-api-access-w9gdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.293501 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-util" (OuterVolumeSpecName: "util") pod "66d983b8-16a0-44ba-8e76-c1a6645c2001" (UID: "66d983b8-16a0-44ba-8e76-c1a6645c2001"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.384495 4995 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.384546 4995 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/66d983b8-16a0-44ba-8e76-c1a6645c2001-util\") on node \"crc\" DevicePath \"\"" Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.384557 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9gdl\" (UniqueName: \"kubernetes.io/projected/66d983b8-16a0-44ba-8e76-c1a6645c2001-kube-api-access-w9gdl\") on node \"crc\" DevicePath \"\"" Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.939228 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" event={"ID":"66d983b8-16a0-44ba-8e76-c1a6645c2001","Type":"ContainerDied","Data":"6b75c32b68428a578d8375615e503048ae9a332a38f81f004f5370e0fbf7891e"} Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.939305 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b75c32b68428a578d8375615e503048ae9a332a38f81f004f5370e0fbf7891e" Jan 20 16:43:59 crc kubenswrapper[4995]: I0120 16:43:59.939305 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp" Jan 20 16:44:00 crc kubenswrapper[4995]: I0120 16:44:00.571774 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:44:00 crc kubenswrapper[4995]: I0120 16:44:00.571842 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:44:00 crc kubenswrapper[4995]: I0120 16:44:00.948534 4995 generic.go:334] "Generic (PLEG): container finished" podID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerID="88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6" exitCode=0 Jan 20 16:44:00 crc kubenswrapper[4995]: I0120 16:44:00.948634 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dh6f7" event={"ID":"4ac2c3fa-f095-43ea-95ab-cf9bee60a996","Type":"ContainerDied","Data":"88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6"} Jan 20 16:44:02 crc kubenswrapper[4995]: I0120 16:44:02.968539 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dh6f7" event={"ID":"4ac2c3fa-f095-43ea-95ab-cf9bee60a996","Type":"ContainerStarted","Data":"824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692"} Jan 20 16:44:02 crc kubenswrapper[4995]: I0120 16:44:02.995950 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dh6f7" podStartSLOduration=3.123279909 podStartE2EDuration="5.995920743s" podCreationTimestamp="2026-01-20 16:43:57 +0000 UTC" firstStartedPulling="2026-01-20 16:43:58.929537789 +0000 UTC m=+757.174142595" lastFinishedPulling="2026-01-20 16:44:01.802178573 +0000 UTC m=+760.046783429" observedRunningTime="2026-01-20 16:44:02.994480713 +0000 UTC m=+761.239085589" watchObservedRunningTime="2026-01-20 16:44:02.995920743 +0000 UTC m=+761.240525599" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.362349 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-l5g5b"] Jan 20 16:44:06 crc kubenswrapper[4995]: E0120 16:44:06.362903 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerName="extract" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.362918 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerName="extract" Jan 20 16:44:06 crc kubenswrapper[4995]: E0120 16:44:06.362927 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerName="pull" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.362934 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerName="pull" Jan 20 16:44:06 crc kubenswrapper[4995]: E0120 16:44:06.362946 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerName="util" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.362954 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerName="util" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.363053 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="66d983b8-16a0-44ba-8e76-c1a6645c2001" containerName="extract" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.363468 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-l5g5b" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.366467 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.366533 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.366567 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-ws9lq" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.382484 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-l5g5b"] Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.539898 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqfpq\" (UniqueName: \"kubernetes.io/projected/eb9959cc-1ba1-48c5-9a2b-846fb2ae6590-kube-api-access-jqfpq\") pod \"nmstate-operator-646758c888-l5g5b\" (UID: \"eb9959cc-1ba1-48c5-9a2b-846fb2ae6590\") " pod="openshift-nmstate/nmstate-operator-646758c888-l5g5b" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.641372 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqfpq\" (UniqueName: \"kubernetes.io/projected/eb9959cc-1ba1-48c5-9a2b-846fb2ae6590-kube-api-access-jqfpq\") pod \"nmstate-operator-646758c888-l5g5b\" (UID: \"eb9959cc-1ba1-48c5-9a2b-846fb2ae6590\") " pod="openshift-nmstate/nmstate-operator-646758c888-l5g5b" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.658850 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqfpq\" (UniqueName: \"kubernetes.io/projected/eb9959cc-1ba1-48c5-9a2b-846fb2ae6590-kube-api-access-jqfpq\") pod \"nmstate-operator-646758c888-l5g5b\" (UID: \"eb9959cc-1ba1-48c5-9a2b-846fb2ae6590\") " pod="openshift-nmstate/nmstate-operator-646758c888-l5g5b" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.679779 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-l5g5b" Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.905778 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-l5g5b"] Jan 20 16:44:06 crc kubenswrapper[4995]: I0120 16:44:06.995574 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-l5g5b" event={"ID":"eb9959cc-1ba1-48c5-9a2b-846fb2ae6590","Type":"ContainerStarted","Data":"e4a9986248d24e080e67fae6b4d464d812aa4f923e6e3be1129ea36baff36b31"} Jan 20 16:44:07 crc kubenswrapper[4995]: I0120 16:44:07.977183 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:44:07 crc kubenswrapper[4995]: I0120 16:44:07.977501 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:44:09 crc kubenswrapper[4995]: I0120 16:44:09.024942 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dh6f7" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerName="registry-server" probeResult="failure" output=< Jan 20 16:44:09 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 16:44:09 crc kubenswrapper[4995]: > Jan 20 16:44:10 crc kubenswrapper[4995]: I0120 16:44:10.016117 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-l5g5b" event={"ID":"eb9959cc-1ba1-48c5-9a2b-846fb2ae6590","Type":"ContainerStarted","Data":"13dc2e1de1c92f4a208bbbd08b8d6fe722d9a7b8cbaa8dacd77dd62440998f9c"} Jan 20 16:44:10 crc kubenswrapper[4995]: I0120 16:44:10.996608 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-l5g5b" podStartSLOduration=2.205645642 podStartE2EDuration="4.996586982s" podCreationTimestamp="2026-01-20 16:44:06 +0000 UTC" firstStartedPulling="2026-01-20 16:44:06.922360643 +0000 UTC m=+765.166965449" lastFinishedPulling="2026-01-20 16:44:09.713301963 +0000 UTC m=+767.957906789" observedRunningTime="2026-01-20 16:44:10.033454784 +0000 UTC m=+768.278059590" watchObservedRunningTime="2026-01-20 16:44:10.996586982 +0000 UTC m=+769.241191788" Jan 20 16:44:10 crc kubenswrapper[4995]: I0120 16:44:10.997145 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-wb9tm"] Jan 20 16:44:10 crc kubenswrapper[4995]: I0120 16:44:10.999577 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-wb9tm" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.002916 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-5v4wd" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.011068 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4"] Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.012680 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.015586 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-wb9tm"] Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.019783 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.031316 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4"] Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.052873 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-lzkmg"] Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.053826 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.100235 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2r85\" (UniqueName: \"kubernetes.io/projected/95224d03-a236-4419-9ea7-35b72ad16367-kube-api-access-v2r85\") pod \"nmstate-metrics-54757c584b-wb9tm\" (UID: \"95224d03-a236-4419-9ea7-35b72ad16367\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-wb9tm" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.100295 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/04b8b048-2dd6-4899-8012-e20e4783fe36-ovs-socket\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.100340 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcstg\" (UniqueName: \"kubernetes.io/projected/04b8b048-2dd6-4899-8012-e20e4783fe36-kube-api-access-xcstg\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.100360 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/04b8b048-2dd6-4899-8012-e20e4783fe36-nmstate-lock\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.100377 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/04b8b048-2dd6-4899-8012-e20e4783fe36-dbus-socket\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.142829 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf"] Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.144642 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.150048 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf"] Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.150219 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.150273 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-jjdpb" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.150440 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202115 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlqkl\" (UniqueName: \"kubernetes.io/projected/5afe74bc-6c6a-4c69-8991-aea61b381a53-kube-api-access-hlqkl\") pod \"nmstate-webhook-8474b5b9d8-95rj4\" (UID: \"5afe74bc-6c6a-4c69-8991-aea61b381a53\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202160 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9e16a4a-ae36-4787-936d-78f9f621b82b-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202187 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2r85\" (UniqueName: \"kubernetes.io/projected/95224d03-a236-4419-9ea7-35b72ad16367-kube-api-access-v2r85\") pod \"nmstate-metrics-54757c584b-wb9tm\" (UID: \"95224d03-a236-4419-9ea7-35b72ad16367\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-wb9tm" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202210 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/e9e16a4a-ae36-4787-936d-78f9f621b82b-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202232 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/04b8b048-2dd6-4899-8012-e20e4783fe36-ovs-socket\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202264 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wkzk\" (UniqueName: \"kubernetes.io/projected/e9e16a4a-ae36-4787-936d-78f9f621b82b-kube-api-access-9wkzk\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202285 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcstg\" (UniqueName: \"kubernetes.io/projected/04b8b048-2dd6-4899-8012-e20e4783fe36-kube-api-access-xcstg\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202305 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/5afe74bc-6c6a-4c69-8991-aea61b381a53-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-95rj4\" (UID: \"5afe74bc-6c6a-4c69-8991-aea61b381a53\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202338 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/04b8b048-2dd6-4899-8012-e20e4783fe36-nmstate-lock\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202363 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/04b8b048-2dd6-4899-8012-e20e4783fe36-dbus-socket\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202700 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/04b8b048-2dd6-4899-8012-e20e4783fe36-dbus-socket\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202772 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/04b8b048-2dd6-4899-8012-e20e4783fe36-ovs-socket\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.202870 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/04b8b048-2dd6-4899-8012-e20e4783fe36-nmstate-lock\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.229784 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcstg\" (UniqueName: \"kubernetes.io/projected/04b8b048-2dd6-4899-8012-e20e4783fe36-kube-api-access-xcstg\") pod \"nmstate-handler-lzkmg\" (UID: \"04b8b048-2dd6-4899-8012-e20e4783fe36\") " pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.230741 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2r85\" (UniqueName: \"kubernetes.io/projected/95224d03-a236-4419-9ea7-35b72ad16367-kube-api-access-v2r85\") pod \"nmstate-metrics-54757c584b-wb9tm\" (UID: \"95224d03-a236-4419-9ea7-35b72ad16367\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-wb9tm" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.303236 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlqkl\" (UniqueName: \"kubernetes.io/projected/5afe74bc-6c6a-4c69-8991-aea61b381a53-kube-api-access-hlqkl\") pod \"nmstate-webhook-8474b5b9d8-95rj4\" (UID: \"5afe74bc-6c6a-4c69-8991-aea61b381a53\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.303297 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9e16a4a-ae36-4787-936d-78f9f621b82b-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.303323 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/e9e16a4a-ae36-4787-936d-78f9f621b82b-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.303360 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wkzk\" (UniqueName: \"kubernetes.io/projected/e9e16a4a-ae36-4787-936d-78f9f621b82b-kube-api-access-9wkzk\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.303381 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/5afe74bc-6c6a-4c69-8991-aea61b381a53-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-95rj4\" (UID: \"5afe74bc-6c6a-4c69-8991-aea61b381a53\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:11 crc kubenswrapper[4995]: E0120 16:44:11.303784 4995 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 20 16:44:11 crc kubenswrapper[4995]: E0120 16:44:11.303852 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e9e16a4a-ae36-4787-936d-78f9f621b82b-plugin-serving-cert podName:e9e16a4a-ae36-4787-936d-78f9f621b82b nodeName:}" failed. No retries permitted until 2026-01-20 16:44:11.803832659 +0000 UTC m=+770.048437475 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/e9e16a4a-ae36-4787-936d-78f9f621b82b-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-wlqrf" (UID: "e9e16a4a-ae36-4787-936d-78f9f621b82b") : secret "plugin-serving-cert" not found Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.304472 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/e9e16a4a-ae36-4787-936d-78f9f621b82b-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.306767 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/5afe74bc-6c6a-4c69-8991-aea61b381a53-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-95rj4\" (UID: \"5afe74bc-6c6a-4c69-8991-aea61b381a53\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.321204 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-78895ffd7d-z7lb4"] Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.321436 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-wb9tm" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.321867 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.332947 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wkzk\" (UniqueName: \"kubernetes.io/projected/e9e16a4a-ae36-4787-936d-78f9f621b82b-kube-api-access-9wkzk\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.336033 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlqkl\" (UniqueName: \"kubernetes.io/projected/5afe74bc-6c6a-4c69-8991-aea61b381a53-kube-api-access-hlqkl\") pod \"nmstate-webhook-8474b5b9d8-95rj4\" (UID: \"5afe74bc-6c6a-4c69-8991-aea61b381a53\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.341377 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-78895ffd7d-z7lb4"] Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.341446 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.382895 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.507408 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrmbj\" (UniqueName: \"kubernetes.io/projected/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-kube-api-access-lrmbj\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.507465 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-console-oauth-config\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.507483 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-trusted-ca-bundle\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.507512 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-service-ca\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.507535 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-oauth-serving-cert\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.507561 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-console-config\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.507591 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-console-serving-cert\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.564168 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-wb9tm"] Jan 20 16:44:11 crc kubenswrapper[4995]: W0120 16:44:11.566952 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95224d03_a236_4419_9ea7_35b72ad16367.slice/crio-5df57e890acb2a56650ff3d0393f87f62d125737d8f17682830b0cd700e7eee2 WatchSource:0}: Error finding container 5df57e890acb2a56650ff3d0393f87f62d125737d8f17682830b0cd700e7eee2: Status 404 returned error can't find the container with id 5df57e890acb2a56650ff3d0393f87f62d125737d8f17682830b0cd700e7eee2 Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.608474 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-oauth-serving-cert\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.608520 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-console-config\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.608552 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-console-serving-cert\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.608581 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrmbj\" (UniqueName: \"kubernetes.io/projected/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-kube-api-access-lrmbj\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.608611 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-console-oauth-config\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.608631 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-trusted-ca-bundle\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.608660 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-service-ca\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.609501 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-service-ca\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.610337 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-trusted-ca-bundle\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.610727 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-oauth-serving-cert\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.610985 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-console-config\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.614745 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-console-oauth-config\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.618021 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-console-serving-cert\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.632469 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrmbj\" (UniqueName: \"kubernetes.io/projected/e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66-kube-api-access-lrmbj\") pod \"console-78895ffd7d-z7lb4\" (UID: \"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66\") " pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.698057 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.805575 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4"] Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.814058 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9e16a4a-ae36-4787-936d-78f9f621b82b-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:11 crc kubenswrapper[4995]: W0120 16:44:11.818599 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5afe74bc_6c6a_4c69_8991_aea61b381a53.slice/crio-9bf1522c6564dae0712b66fdfd0fb6ab46fdd38a87b0d39d3aebc93c5550eaff WatchSource:0}: Error finding container 9bf1522c6564dae0712b66fdfd0fb6ab46fdd38a87b0d39d3aebc93c5550eaff: Status 404 returned error can't find the container with id 9bf1522c6564dae0712b66fdfd0fb6ab46fdd38a87b0d39d3aebc93c5550eaff Jan 20 16:44:11 crc kubenswrapper[4995]: I0120 16:44:11.820163 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/e9e16a4a-ae36-4787-936d-78f9f621b82b-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-wlqrf\" (UID: \"e9e16a4a-ae36-4787-936d-78f9f621b82b\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:12 crc kubenswrapper[4995]: I0120 16:44:12.039004 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-wb9tm" event={"ID":"95224d03-a236-4419-9ea7-35b72ad16367","Type":"ContainerStarted","Data":"5df57e890acb2a56650ff3d0393f87f62d125737d8f17682830b0cd700e7eee2"} Jan 20 16:44:12 crc kubenswrapper[4995]: I0120 16:44:12.040512 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" event={"ID":"5afe74bc-6c6a-4c69-8991-aea61b381a53","Type":"ContainerStarted","Data":"9bf1522c6564dae0712b66fdfd0fb6ab46fdd38a87b0d39d3aebc93c5550eaff"} Jan 20 16:44:12 crc kubenswrapper[4995]: I0120 16:44:12.042119 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-lzkmg" event={"ID":"04b8b048-2dd6-4899-8012-e20e4783fe36","Type":"ContainerStarted","Data":"b5319c3dc91897f38508bebae2291858c20b6c646f1ae1e5a95fff53bc49c4bb"} Jan 20 16:44:12 crc kubenswrapper[4995]: I0120 16:44:12.062881 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" Jan 20 16:44:12 crc kubenswrapper[4995]: I0120 16:44:12.107757 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-78895ffd7d-z7lb4"] Jan 20 16:44:12 crc kubenswrapper[4995]: I0120 16:44:12.291886 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf"] Jan 20 16:44:12 crc kubenswrapper[4995]: W0120 16:44:12.296366 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode9e16a4a_ae36_4787_936d_78f9f621b82b.slice/crio-823f1c1741a41bc71c199f0fc788525877cfa5de99445126ec4744126a12b77b WatchSource:0}: Error finding container 823f1c1741a41bc71c199f0fc788525877cfa5de99445126ec4744126a12b77b: Status 404 returned error can't find the container with id 823f1c1741a41bc71c199f0fc788525877cfa5de99445126ec4744126a12b77b Jan 20 16:44:13 crc kubenswrapper[4995]: I0120 16:44:13.052248 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-78895ffd7d-z7lb4" event={"ID":"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66","Type":"ContainerStarted","Data":"787061f28e8f3e28d40900059569f64ddf2e0683a8042da52f6170fa128a10ab"} Jan 20 16:44:13 crc kubenswrapper[4995]: I0120 16:44:13.053672 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" event={"ID":"e9e16a4a-ae36-4787-936d-78f9f621b82b","Type":"ContainerStarted","Data":"823f1c1741a41bc71c199f0fc788525877cfa5de99445126ec4744126a12b77b"} Jan 20 16:44:14 crc kubenswrapper[4995]: I0120 16:44:14.062350 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-78895ffd7d-z7lb4" event={"ID":"e08919f8-ddfb-4ec8-8d78-7c3d9dcbeb66","Type":"ContainerStarted","Data":"88cf672ed06d5f8664e0f4421d2799a5215e86a0350411cc08a96c489cf96634"} Jan 20 16:44:14 crc kubenswrapper[4995]: I0120 16:44:14.089333 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-78895ffd7d-z7lb4" podStartSLOduration=3.089313928 podStartE2EDuration="3.089313928s" podCreationTimestamp="2026-01-20 16:44:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:44:14.088219178 +0000 UTC m=+772.332823994" watchObservedRunningTime="2026-01-20 16:44:14.089313928 +0000 UTC m=+772.333918734" Jan 20 16:44:16 crc kubenswrapper[4995]: I0120 16:44:16.093199 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-lzkmg" event={"ID":"04b8b048-2dd6-4899-8012-e20e4783fe36","Type":"ContainerStarted","Data":"cbd8d7be810c6a3881fd3c17055752f36c38ba275b05f1e97512f2a4f5d0bd6d"} Jan 20 16:44:16 crc kubenswrapper[4995]: I0120 16:44:16.093941 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:16 crc kubenswrapper[4995]: I0120 16:44:16.096520 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-wb9tm" event={"ID":"95224d03-a236-4419-9ea7-35b72ad16367","Type":"ContainerStarted","Data":"a60181991a8c6faeeaa314e8731548aa77f665ef93e2faf29a2fafe8b9045d80"} Jan 20 16:44:16 crc kubenswrapper[4995]: I0120 16:44:16.097762 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" event={"ID":"5afe74bc-6c6a-4c69-8991-aea61b381a53","Type":"ContainerStarted","Data":"668ff1a5b92175b431af2555b6f7569a6b31bd081b52ad19e810e265d917da75"} Jan 20 16:44:16 crc kubenswrapper[4995]: I0120 16:44:16.098171 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:16 crc kubenswrapper[4995]: I0120 16:44:16.099382 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" event={"ID":"e9e16a4a-ae36-4787-936d-78f9f621b82b","Type":"ContainerStarted","Data":"117860ae90ba66cf50d75439d2750000a09412888fb0c150b6d08a7d6dea6c6d"} Jan 20 16:44:16 crc kubenswrapper[4995]: I0120 16:44:16.118925 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-lzkmg" podStartSLOduration=1.328534436 podStartE2EDuration="5.11890395s" podCreationTimestamp="2026-01-20 16:44:11 +0000 UTC" firstStartedPulling="2026-01-20 16:44:11.420915233 +0000 UTC m=+769.665520039" lastFinishedPulling="2026-01-20 16:44:15.211284747 +0000 UTC m=+773.455889553" observedRunningTime="2026-01-20 16:44:16.116259348 +0000 UTC m=+774.360864194" watchObservedRunningTime="2026-01-20 16:44:16.11890395 +0000 UTC m=+774.363508756" Jan 20 16:44:16 crc kubenswrapper[4995]: I0120 16:44:16.140045 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-wlqrf" podStartSLOduration=2.230522807 podStartE2EDuration="5.140026191s" podCreationTimestamp="2026-01-20 16:44:11 +0000 UTC" firstStartedPulling="2026-01-20 16:44:12.299436118 +0000 UTC m=+770.544040924" lastFinishedPulling="2026-01-20 16:44:15.208939482 +0000 UTC m=+773.453544308" observedRunningTime="2026-01-20 16:44:16.136637468 +0000 UTC m=+774.381242284" watchObservedRunningTime="2026-01-20 16:44:16.140026191 +0000 UTC m=+774.384630997" Jan 20 16:44:16 crc kubenswrapper[4995]: I0120 16:44:16.166908 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" podStartSLOduration=2.7688536089999998 podStartE2EDuration="6.165933132s" podCreationTimestamp="2026-01-20 16:44:10 +0000 UTC" firstStartedPulling="2026-01-20 16:44:11.819558111 +0000 UTC m=+770.064162917" lastFinishedPulling="2026-01-20 16:44:15.216637634 +0000 UTC m=+773.461242440" observedRunningTime="2026-01-20 16:44:16.165922042 +0000 UTC m=+774.410526878" watchObservedRunningTime="2026-01-20 16:44:16.165933132 +0000 UTC m=+774.410537978" Jan 20 16:44:18 crc kubenswrapper[4995]: I0120 16:44:18.077888 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:44:18 crc kubenswrapper[4995]: I0120 16:44:18.119646 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-wb9tm" event={"ID":"95224d03-a236-4419-9ea7-35b72ad16367","Type":"ContainerStarted","Data":"c6ec8c22079a3ee3d8f82c0f879fdb961d5a888ce221566755b77bf30a1d9726"} Jan 20 16:44:18 crc kubenswrapper[4995]: I0120 16:44:18.147640 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-wb9tm" podStartSLOduration=2.243651206 podStartE2EDuration="8.147611819s" podCreationTimestamp="2026-01-20 16:44:10 +0000 UTC" firstStartedPulling="2026-01-20 16:44:11.570274815 +0000 UTC m=+769.814879611" lastFinishedPulling="2026-01-20 16:44:17.474235378 +0000 UTC m=+775.718840224" observedRunningTime="2026-01-20 16:44:18.13672084 +0000 UTC m=+776.381325696" watchObservedRunningTime="2026-01-20 16:44:18.147611819 +0000 UTC m=+776.392216655" Jan 20 16:44:18 crc kubenswrapper[4995]: I0120 16:44:18.151964 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:44:20 crc kubenswrapper[4995]: I0120 16:44:20.481363 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dh6f7"] Jan 20 16:44:20 crc kubenswrapper[4995]: I0120 16:44:20.482120 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dh6f7" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerName="registry-server" containerID="cri-o://824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692" gracePeriod=2 Jan 20 16:44:20 crc kubenswrapper[4995]: I0120 16:44:20.922864 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.048788 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-utilities\") pod \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.049293 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-catalog-content\") pod \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.049656 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqj8d\" (UniqueName: \"kubernetes.io/projected/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-kube-api-access-bqj8d\") pod \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\" (UID: \"4ac2c3fa-f095-43ea-95ab-cf9bee60a996\") " Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.050156 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-utilities" (OuterVolumeSpecName: "utilities") pod "4ac2c3fa-f095-43ea-95ab-cf9bee60a996" (UID: "4ac2c3fa-f095-43ea-95ab-cf9bee60a996"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.050530 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.057679 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-kube-api-access-bqj8d" (OuterVolumeSpecName: "kube-api-access-bqj8d") pod "4ac2c3fa-f095-43ea-95ab-cf9bee60a996" (UID: "4ac2c3fa-f095-43ea-95ab-cf9bee60a996"). InnerVolumeSpecName "kube-api-access-bqj8d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.148257 4995 generic.go:334] "Generic (PLEG): container finished" podID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerID="824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692" exitCode=0 Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.148338 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dh6f7" event={"ID":"4ac2c3fa-f095-43ea-95ab-cf9bee60a996","Type":"ContainerDied","Data":"824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692"} Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.148387 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dh6f7" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.148419 4995 scope.go:117] "RemoveContainer" containerID="824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.148398 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dh6f7" event={"ID":"4ac2c3fa-f095-43ea-95ab-cf9bee60a996","Type":"ContainerDied","Data":"16512ed0781e0ad4ae335a406a3408356b1c6b46b9139306900745be2c6cd464"} Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.152894 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqj8d\" (UniqueName: \"kubernetes.io/projected/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-kube-api-access-bqj8d\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.174842 4995 scope.go:117] "RemoveContainer" containerID="88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.207819 4995 scope.go:117] "RemoveContainer" containerID="452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.231771 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ac2c3fa-f095-43ea-95ab-cf9bee60a996" (UID: "4ac2c3fa-f095-43ea-95ab-cf9bee60a996"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.240545 4995 scope.go:117] "RemoveContainer" containerID="824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692" Jan 20 16:44:21 crc kubenswrapper[4995]: E0120 16:44:21.241437 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692\": container with ID starting with 824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692 not found: ID does not exist" containerID="824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.241517 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692"} err="failed to get container status \"824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692\": rpc error: code = NotFound desc = could not find container \"824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692\": container with ID starting with 824ec5832fb4512e00d8c9f86406e0f875ae985868eaebde07ea53786b283692 not found: ID does not exist" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.241571 4995 scope.go:117] "RemoveContainer" containerID="88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6" Jan 20 16:44:21 crc kubenswrapper[4995]: E0120 16:44:21.242248 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6\": container with ID starting with 88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6 not found: ID does not exist" containerID="88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.242308 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6"} err="failed to get container status \"88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6\": rpc error: code = NotFound desc = could not find container \"88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6\": container with ID starting with 88b46ad3cf780b978eb8140d01bb511541cdf9bdb54de1b6e4595a281e1fc6d6 not found: ID does not exist" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.242350 4995 scope.go:117] "RemoveContainer" containerID="452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487" Jan 20 16:44:21 crc kubenswrapper[4995]: E0120 16:44:21.242850 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487\": container with ID starting with 452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487 not found: ID does not exist" containerID="452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.242898 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487"} err="failed to get container status \"452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487\": rpc error: code = NotFound desc = could not find container \"452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487\": container with ID starting with 452df4fa76a591b7840e2a1ffeac675097f5293d814428eb295e846e381de487 not found: ID does not exist" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.254268 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ac2c3fa-f095-43ea-95ab-cf9bee60a996-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.422480 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-lzkmg" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.492033 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dh6f7"] Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.498621 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dh6f7"] Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.698417 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.698483 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:21 crc kubenswrapper[4995]: I0120 16:44:21.706526 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:22 crc kubenswrapper[4995]: I0120 16:44:22.006580 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" path="/var/lib/kubelet/pods/4ac2c3fa-f095-43ea-95ab-cf9bee60a996/volumes" Jan 20 16:44:22 crc kubenswrapper[4995]: I0120 16:44:22.162220 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-78895ffd7d-z7lb4" Jan 20 16:44:22 crc kubenswrapper[4995]: I0120 16:44:22.221170 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-jzs8c"] Jan 20 16:44:30 crc kubenswrapper[4995]: I0120 16:44:30.571694 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:44:30 crc kubenswrapper[4995]: I0120 16:44:30.572763 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:44:30 crc kubenswrapper[4995]: I0120 16:44:30.572842 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:44:30 crc kubenswrapper[4995]: I0120 16:44:30.574109 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"21a51aeb68249229f8bec50af82e0400807574c3c8c35d6878a257fbb5a8baf3"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 16:44:30 crc kubenswrapper[4995]: I0120 16:44:30.574230 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://21a51aeb68249229f8bec50af82e0400807574c3c8c35d6878a257fbb5a8baf3" gracePeriod=600 Jan 20 16:44:31 crc kubenswrapper[4995]: I0120 16:44:31.231469 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="21a51aeb68249229f8bec50af82e0400807574c3c8c35d6878a257fbb5a8baf3" exitCode=0 Jan 20 16:44:31 crc kubenswrapper[4995]: I0120 16:44:31.231812 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"21a51aeb68249229f8bec50af82e0400807574c3c8c35d6878a257fbb5a8baf3"} Jan 20 16:44:31 crc kubenswrapper[4995]: I0120 16:44:31.231845 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"ef514ad170d2e1a38aa428bd4835a847c0ca2074e5ec7e7cc5427ce30e0cd1ed"} Jan 20 16:44:31 crc kubenswrapper[4995]: I0120 16:44:31.231866 4995 scope.go:117] "RemoveContainer" containerID="693a83565e92db396315c9a438801f27de4160695f8ccbcf90e6d5eab58bd11c" Jan 20 16:44:31 crc kubenswrapper[4995]: I0120 16:44:31.350354 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-95rj4" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.435211 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b"] Jan 20 16:44:45 crc kubenswrapper[4995]: E0120 16:44:45.435938 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerName="extract-content" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.435949 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerName="extract-content" Jan 20 16:44:45 crc kubenswrapper[4995]: E0120 16:44:45.435966 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerName="extract-utilities" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.435971 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerName="extract-utilities" Jan 20 16:44:45 crc kubenswrapper[4995]: E0120 16:44:45.435987 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerName="registry-server" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.435992 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerName="registry-server" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.436118 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ac2c3fa-f095-43ea-95ab-cf9bee60a996" containerName="registry-server" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.436881 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.439255 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.451185 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b"] Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.503298 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.503370 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.503450 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz2wl\" (UniqueName: \"kubernetes.io/projected/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-kube-api-access-sz2wl\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.605186 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.605251 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.605333 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz2wl\" (UniqueName: \"kubernetes.io/projected/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-kube-api-access-sz2wl\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.605810 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.606105 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.624523 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz2wl\" (UniqueName: \"kubernetes.io/projected/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-kube-api-access-sz2wl\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:45 crc kubenswrapper[4995]: I0120 16:44:45.763672 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:46 crc kubenswrapper[4995]: I0120 16:44:46.014784 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b"] Jan 20 16:44:46 crc kubenswrapper[4995]: I0120 16:44:46.327070 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" event={"ID":"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d","Type":"ContainerStarted","Data":"0ee78c35c08d054fcc9d0995228903fc873ac8a7d044d591ff86f8b64021beb6"} Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.274553 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-jzs8c" podUID="2cd4e02b-cb10-4bb2-b318-d24372346b1d" containerName="console" containerID="cri-o://99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e" gracePeriod=15 Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.335493 4995 generic.go:334] "Generic (PLEG): container finished" podID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerID="c1980a71c723bafac549abf330fc298b1cd220aa9613ced6e82ef1cba3accbb7" exitCode=0 Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.335775 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" event={"ID":"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d","Type":"ContainerDied","Data":"c1980a71c723bafac549abf330fc298b1cd220aa9613ced6e82ef1cba3accbb7"} Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.668655 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-jzs8c_2cd4e02b-cb10-4bb2-b318-d24372346b1d/console/0.log" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.668721 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.731624 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrnrv\" (UniqueName: \"kubernetes.io/projected/2cd4e02b-cb10-4bb2-b318-d24372346b1d-kube-api-access-xrnrv\") pod \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.731740 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-config\") pod \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.731804 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-trusted-ca-bundle\") pod \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.731843 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-oauth-config\") pod \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.731878 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-serving-cert\") pod \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.731907 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-service-ca\") pod \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.731935 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-oauth-serving-cert\") pod \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\" (UID: \"2cd4e02b-cb10-4bb2-b318-d24372346b1d\") " Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.732713 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-config" (OuterVolumeSpecName: "console-config") pod "2cd4e02b-cb10-4bb2-b318-d24372346b1d" (UID: "2cd4e02b-cb10-4bb2-b318-d24372346b1d"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.732770 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "2cd4e02b-cb10-4bb2-b318-d24372346b1d" (UID: "2cd4e02b-cb10-4bb2-b318-d24372346b1d"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.732785 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "2cd4e02b-cb10-4bb2-b318-d24372346b1d" (UID: "2cd4e02b-cb10-4bb2-b318-d24372346b1d"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.732876 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-service-ca" (OuterVolumeSpecName: "service-ca") pod "2cd4e02b-cb10-4bb2-b318-d24372346b1d" (UID: "2cd4e02b-cb10-4bb2-b318-d24372346b1d"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.740120 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "2cd4e02b-cb10-4bb2-b318-d24372346b1d" (UID: "2cd4e02b-cb10-4bb2-b318-d24372346b1d"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.742385 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cd4e02b-cb10-4bb2-b318-d24372346b1d-kube-api-access-xrnrv" (OuterVolumeSpecName: "kube-api-access-xrnrv") pod "2cd4e02b-cb10-4bb2-b318-d24372346b1d" (UID: "2cd4e02b-cb10-4bb2-b318-d24372346b1d"). InnerVolumeSpecName "kube-api-access-xrnrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.742938 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "2cd4e02b-cb10-4bb2-b318-d24372346b1d" (UID: "2cd4e02b-cb10-4bb2-b318-d24372346b1d"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.832971 4995 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.833000 4995 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.833010 4995 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.833028 4995 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2cd4e02b-cb10-4bb2-b318-d24372346b1d-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.833038 4995 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.833046 4995 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2cd4e02b-cb10-4bb2-b318-d24372346b1d-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:47 crc kubenswrapper[4995]: I0120 16:44:47.833054 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrnrv\" (UniqueName: \"kubernetes.io/projected/2cd4e02b-cb10-4bb2-b318-d24372346b1d-kube-api-access-xrnrv\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.344599 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-jzs8c_2cd4e02b-cb10-4bb2-b318-d24372346b1d/console/0.log" Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.344652 4995 generic.go:334] "Generic (PLEG): container finished" podID="2cd4e02b-cb10-4bb2-b318-d24372346b1d" containerID="99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e" exitCode=2 Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.344682 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jzs8c" event={"ID":"2cd4e02b-cb10-4bb2-b318-d24372346b1d","Type":"ContainerDied","Data":"99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e"} Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.344709 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-jzs8c" event={"ID":"2cd4e02b-cb10-4bb2-b318-d24372346b1d","Type":"ContainerDied","Data":"fe66f518d0c883f269ac72f6f10b6bc376ed19a30ceb73f2fd7b63bf7986eeff"} Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.344729 4995 scope.go:117] "RemoveContainer" containerID="99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e" Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.344834 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-jzs8c" Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.375114 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-jzs8c"] Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.376504 4995 scope.go:117] "RemoveContainer" containerID="99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e" Jan 20 16:44:48 crc kubenswrapper[4995]: E0120 16:44:48.377142 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e\": container with ID starting with 99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e not found: ID does not exist" containerID="99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e" Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.377186 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e"} err="failed to get container status \"99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e\": rpc error: code = NotFound desc = could not find container \"99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e\": container with ID starting with 99544c5979dee9c8910cd0e84d538296c9006a4c1a5e909e441e3013d961088e not found: ID does not exist" Jan 20 16:44:48 crc kubenswrapper[4995]: I0120 16:44:48.383592 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-jzs8c"] Jan 20 16:44:49 crc kubenswrapper[4995]: I0120 16:44:49.357379 4995 generic.go:334] "Generic (PLEG): container finished" podID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerID="acd0de9a58376edb5453d254c45fa6e2cdd8d7372109852185b9e697db5db2fe" exitCode=0 Jan 20 16:44:49 crc kubenswrapper[4995]: I0120 16:44:49.357413 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" event={"ID":"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d","Type":"ContainerDied","Data":"acd0de9a58376edb5453d254c45fa6e2cdd8d7372109852185b9e697db5db2fe"} Jan 20 16:44:49 crc kubenswrapper[4995]: I0120 16:44:49.995529 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cd4e02b-cb10-4bb2-b318-d24372346b1d" path="/var/lib/kubelet/pods/2cd4e02b-cb10-4bb2-b318-d24372346b1d/volumes" Jan 20 16:44:50 crc kubenswrapper[4995]: I0120 16:44:50.367678 4995 generic.go:334] "Generic (PLEG): container finished" podID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerID="b2869250d04c11fec079e9f8db81753ac05a58fa66a4ed7faea48ad3a19fd4dd" exitCode=0 Jan 20 16:44:50 crc kubenswrapper[4995]: I0120 16:44:50.367739 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" event={"ID":"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d","Type":"ContainerDied","Data":"b2869250d04c11fec079e9f8db81753ac05a58fa66a4ed7faea48ad3a19fd4dd"} Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.648061 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.702695 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-bundle\") pod \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.702844 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-util\") pod \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.702901 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sz2wl\" (UniqueName: \"kubernetes.io/projected/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-kube-api-access-sz2wl\") pod \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\" (UID: \"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d\") " Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.703843 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-bundle" (OuterVolumeSpecName: "bundle") pod "d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" (UID: "d10c311c-330e-4ef3-bfb4-bbb14ca8d42d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.710499 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-kube-api-access-sz2wl" (OuterVolumeSpecName: "kube-api-access-sz2wl") pod "d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" (UID: "d10c311c-330e-4ef3-bfb4-bbb14ca8d42d"). InnerVolumeSpecName "kube-api-access-sz2wl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.716487 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-util" (OuterVolumeSpecName: "util") pod "d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" (UID: "d10c311c-330e-4ef3-bfb4-bbb14ca8d42d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.804026 4995 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-util\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.804102 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sz2wl\" (UniqueName: \"kubernetes.io/projected/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-kube-api-access-sz2wl\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:51 crc kubenswrapper[4995]: I0120 16:44:51.804123 4995 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d10c311c-330e-4ef3-bfb4-bbb14ca8d42d-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:44:52 crc kubenswrapper[4995]: I0120 16:44:52.384056 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" event={"ID":"d10c311c-330e-4ef3-bfb4-bbb14ca8d42d","Type":"ContainerDied","Data":"0ee78c35c08d054fcc9d0995228903fc873ac8a7d044d591ff86f8b64021beb6"} Jan 20 16:44:52 crc kubenswrapper[4995]: I0120 16:44:52.384147 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ee78c35c08d054fcc9d0995228903fc873ac8a7d044d591ff86f8b64021beb6" Jan 20 16:44:52 crc kubenswrapper[4995]: I0120 16:44:52.384129 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.146156 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7"] Jan 20 16:45:00 crc kubenswrapper[4995]: E0120 16:45:00.147064 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerName="extract" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.147109 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerName="extract" Jan 20 16:45:00 crc kubenswrapper[4995]: E0120 16:45:00.147124 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cd4e02b-cb10-4bb2-b318-d24372346b1d" containerName="console" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.147133 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cd4e02b-cb10-4bb2-b318-d24372346b1d" containerName="console" Jan 20 16:45:00 crc kubenswrapper[4995]: E0120 16:45:00.147148 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerName="util" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.147156 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerName="util" Jan 20 16:45:00 crc kubenswrapper[4995]: E0120 16:45:00.147169 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerName="pull" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.147177 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerName="pull" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.147310 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cd4e02b-cb10-4bb2-b318-d24372346b1d" containerName="console" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.147333 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="d10c311c-330e-4ef3-bfb4-bbb14ca8d42d" containerName="extract" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.147858 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.150584 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.150766 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.161707 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7"] Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.216447 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca405ab9-f0ea-489f-bcec-8e6e686e66af-config-volume\") pod \"collect-profiles-29482125-5cbs7\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.216589 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca405ab9-f0ea-489f-bcec-8e6e686e66af-secret-volume\") pod \"collect-profiles-29482125-5cbs7\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.216624 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxht5\" (UniqueName: \"kubernetes.io/projected/ca405ab9-f0ea-489f-bcec-8e6e686e66af-kube-api-access-rxht5\") pod \"collect-profiles-29482125-5cbs7\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.317961 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca405ab9-f0ea-489f-bcec-8e6e686e66af-secret-volume\") pod \"collect-profiles-29482125-5cbs7\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.318011 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxht5\" (UniqueName: \"kubernetes.io/projected/ca405ab9-f0ea-489f-bcec-8e6e686e66af-kube-api-access-rxht5\") pod \"collect-profiles-29482125-5cbs7\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.318053 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca405ab9-f0ea-489f-bcec-8e6e686e66af-config-volume\") pod \"collect-profiles-29482125-5cbs7\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.318914 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca405ab9-f0ea-489f-bcec-8e6e686e66af-config-volume\") pod \"collect-profiles-29482125-5cbs7\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.324069 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca405ab9-f0ea-489f-bcec-8e6e686e66af-secret-volume\") pod \"collect-profiles-29482125-5cbs7\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.333607 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxht5\" (UniqueName: \"kubernetes.io/projected/ca405ab9-f0ea-489f-bcec-8e6e686e66af-kube-api-access-rxht5\") pod \"collect-profiles-29482125-5cbs7\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.471143 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:00 crc kubenswrapper[4995]: I0120 16:45:00.716721 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7"] Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.457567 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4"] Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.460264 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.471411 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-crthk" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.471705 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.471895 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.472283 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.472522 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.480965 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4"] Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.528365 4995 generic.go:334] "Generic (PLEG): container finished" podID="ca405ab9-f0ea-489f-bcec-8e6e686e66af" containerID="ae7c442644900fbdc71848c90b976e434c4defb574ecc6013f95083d82d434c4" exitCode=0 Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.528403 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" event={"ID":"ca405ab9-f0ea-489f-bcec-8e6e686e66af","Type":"ContainerDied","Data":"ae7c442644900fbdc71848c90b976e434c4defb574ecc6013f95083d82d434c4"} Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.528436 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" event={"ID":"ca405ab9-f0ea-489f-bcec-8e6e686e66af","Type":"ContainerStarted","Data":"5bef3b3d47fd1cac6c51b9e2fe55efd2670315364d79957afd9870ee24917d1a"} Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.529238 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfhn5\" (UniqueName: \"kubernetes.io/projected/8b17b582-a06b-4ece-b513-7f826c838f6f-kube-api-access-qfhn5\") pod \"metallb-operator-controller-manager-6dd7779458-w2rt4\" (UID: \"8b17b582-a06b-4ece-b513-7f826c838f6f\") " pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.529319 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8b17b582-a06b-4ece-b513-7f826c838f6f-apiservice-cert\") pod \"metallb-operator-controller-manager-6dd7779458-w2rt4\" (UID: \"8b17b582-a06b-4ece-b513-7f826c838f6f\") " pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.529417 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8b17b582-a06b-4ece-b513-7f826c838f6f-webhook-cert\") pod \"metallb-operator-controller-manager-6dd7779458-w2rt4\" (UID: \"8b17b582-a06b-4ece-b513-7f826c838f6f\") " pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.630875 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8b17b582-a06b-4ece-b513-7f826c838f6f-apiservice-cert\") pod \"metallb-operator-controller-manager-6dd7779458-w2rt4\" (UID: \"8b17b582-a06b-4ece-b513-7f826c838f6f\") " pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.630940 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8b17b582-a06b-4ece-b513-7f826c838f6f-webhook-cert\") pod \"metallb-operator-controller-manager-6dd7779458-w2rt4\" (UID: \"8b17b582-a06b-4ece-b513-7f826c838f6f\") " pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.631008 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfhn5\" (UniqueName: \"kubernetes.io/projected/8b17b582-a06b-4ece-b513-7f826c838f6f-kube-api-access-qfhn5\") pod \"metallb-operator-controller-manager-6dd7779458-w2rt4\" (UID: \"8b17b582-a06b-4ece-b513-7f826c838f6f\") " pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.636880 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8b17b582-a06b-4ece-b513-7f826c838f6f-webhook-cert\") pod \"metallb-operator-controller-manager-6dd7779458-w2rt4\" (UID: \"8b17b582-a06b-4ece-b513-7f826c838f6f\") " pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.638571 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8b17b582-a06b-4ece-b513-7f826c838f6f-apiservice-cert\") pod \"metallb-operator-controller-manager-6dd7779458-w2rt4\" (UID: \"8b17b582-a06b-4ece-b513-7f826c838f6f\") " pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.654969 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfhn5\" (UniqueName: \"kubernetes.io/projected/8b17b582-a06b-4ece-b513-7f826c838f6f-kube-api-access-qfhn5\") pod \"metallb-operator-controller-manager-6dd7779458-w2rt4\" (UID: \"8b17b582-a06b-4ece-b513-7f826c838f6f\") " pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.786056 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv"] Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.786954 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.789241 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-9xjq9" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.789277 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.789388 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.799942 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.803365 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv"] Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.834285 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcwvd\" (UniqueName: \"kubernetes.io/projected/7fae7627-5782-4525-ba17-4507d15764cd-kube-api-access-jcwvd\") pod \"metallb-operator-webhook-server-5c6d4b5599-f8tsv\" (UID: \"7fae7627-5782-4525-ba17-4507d15764cd\") " pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.834351 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7fae7627-5782-4525-ba17-4507d15764cd-apiservice-cert\") pod \"metallb-operator-webhook-server-5c6d4b5599-f8tsv\" (UID: \"7fae7627-5782-4525-ba17-4507d15764cd\") " pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.834513 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7fae7627-5782-4525-ba17-4507d15764cd-webhook-cert\") pod \"metallb-operator-webhook-server-5c6d4b5599-f8tsv\" (UID: \"7fae7627-5782-4525-ba17-4507d15764cd\") " pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.935965 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7fae7627-5782-4525-ba17-4507d15764cd-apiservice-cert\") pod \"metallb-operator-webhook-server-5c6d4b5599-f8tsv\" (UID: \"7fae7627-5782-4525-ba17-4507d15764cd\") " pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.936051 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7fae7627-5782-4525-ba17-4507d15764cd-webhook-cert\") pod \"metallb-operator-webhook-server-5c6d4b5599-f8tsv\" (UID: \"7fae7627-5782-4525-ba17-4507d15764cd\") " pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.936104 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcwvd\" (UniqueName: \"kubernetes.io/projected/7fae7627-5782-4525-ba17-4507d15764cd-kube-api-access-jcwvd\") pod \"metallb-operator-webhook-server-5c6d4b5599-f8tsv\" (UID: \"7fae7627-5782-4525-ba17-4507d15764cd\") " pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.940638 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7fae7627-5782-4525-ba17-4507d15764cd-webhook-cert\") pod \"metallb-operator-webhook-server-5c6d4b5599-f8tsv\" (UID: \"7fae7627-5782-4525-ba17-4507d15764cd\") " pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.942773 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7fae7627-5782-4525-ba17-4507d15764cd-apiservice-cert\") pod \"metallb-operator-webhook-server-5c6d4b5599-f8tsv\" (UID: \"7fae7627-5782-4525-ba17-4507d15764cd\") " pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:01 crc kubenswrapper[4995]: I0120 16:45:01.964691 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcwvd\" (UniqueName: \"kubernetes.io/projected/7fae7627-5782-4525-ba17-4507d15764cd-kube-api-access-jcwvd\") pod \"metallb-operator-webhook-server-5c6d4b5599-f8tsv\" (UID: \"7fae7627-5782-4525-ba17-4507d15764cd\") " pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.068339 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4"] Jan 20 16:45:02 crc kubenswrapper[4995]: W0120 16:45:02.086882 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b17b582_a06b_4ece_b513_7f826c838f6f.slice/crio-7e5e66371d9eedfbe3b9dcdf5cbebbc1a9cd4039d05d85f0e736e738e3142824 WatchSource:0}: Error finding container 7e5e66371d9eedfbe3b9dcdf5cbebbc1a9cd4039d05d85f0e736e738e3142824: Status 404 returned error can't find the container with id 7e5e66371d9eedfbe3b9dcdf5cbebbc1a9cd4039d05d85f0e736e738e3142824 Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.104712 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.311375 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv"] Jan 20 16:45:02 crc kubenswrapper[4995]: W0120 16:45:02.316491 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7fae7627_5782_4525_ba17_4507d15764cd.slice/crio-3013f70e406a0e7eb178695c4f01e4d6bf5aeeed1cd79966f0fbb7fd6c366e85 WatchSource:0}: Error finding container 3013f70e406a0e7eb178695c4f01e4d6bf5aeeed1cd79966f0fbb7fd6c366e85: Status 404 returned error can't find the container with id 3013f70e406a0e7eb178695c4f01e4d6bf5aeeed1cd79966f0fbb7fd6c366e85 Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.535148 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" event={"ID":"8b17b582-a06b-4ece-b513-7f826c838f6f","Type":"ContainerStarted","Data":"7e5e66371d9eedfbe3b9dcdf5cbebbc1a9cd4039d05d85f0e736e738e3142824"} Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.537561 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" event={"ID":"7fae7627-5782-4525-ba17-4507d15764cd","Type":"ContainerStarted","Data":"3013f70e406a0e7eb178695c4f01e4d6bf5aeeed1cd79966f0fbb7fd6c366e85"} Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.822285 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.951927 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca405ab9-f0ea-489f-bcec-8e6e686e66af-config-volume\") pod \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.952035 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxht5\" (UniqueName: \"kubernetes.io/projected/ca405ab9-f0ea-489f-bcec-8e6e686e66af-kube-api-access-rxht5\") pod \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.952130 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca405ab9-f0ea-489f-bcec-8e6e686e66af-secret-volume\") pod \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\" (UID: \"ca405ab9-f0ea-489f-bcec-8e6e686e66af\") " Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.953592 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca405ab9-f0ea-489f-bcec-8e6e686e66af-config-volume" (OuterVolumeSpecName: "config-volume") pod "ca405ab9-f0ea-489f-bcec-8e6e686e66af" (UID: "ca405ab9-f0ea-489f-bcec-8e6e686e66af"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.960240 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca405ab9-f0ea-489f-bcec-8e6e686e66af-kube-api-access-rxht5" (OuterVolumeSpecName: "kube-api-access-rxht5") pod "ca405ab9-f0ea-489f-bcec-8e6e686e66af" (UID: "ca405ab9-f0ea-489f-bcec-8e6e686e66af"). InnerVolumeSpecName "kube-api-access-rxht5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:45:02 crc kubenswrapper[4995]: I0120 16:45:02.970705 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca405ab9-f0ea-489f-bcec-8e6e686e66af-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ca405ab9-f0ea-489f-bcec-8e6e686e66af" (UID: "ca405ab9-f0ea-489f-bcec-8e6e686e66af"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:45:03 crc kubenswrapper[4995]: I0120 16:45:03.053696 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca405ab9-f0ea-489f-bcec-8e6e686e66af-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 16:45:03 crc kubenswrapper[4995]: I0120 16:45:03.053733 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca405ab9-f0ea-489f-bcec-8e6e686e66af-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 16:45:03 crc kubenswrapper[4995]: I0120 16:45:03.053743 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxht5\" (UniqueName: \"kubernetes.io/projected/ca405ab9-f0ea-489f-bcec-8e6e686e66af-kube-api-access-rxht5\") on node \"crc\" DevicePath \"\"" Jan 20 16:45:03 crc kubenswrapper[4995]: I0120 16:45:03.545336 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" event={"ID":"ca405ab9-f0ea-489f-bcec-8e6e686e66af","Type":"ContainerDied","Data":"5bef3b3d47fd1cac6c51b9e2fe55efd2670315364d79957afd9870ee24917d1a"} Jan 20 16:45:03 crc kubenswrapper[4995]: I0120 16:45:03.545657 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5bef3b3d47fd1cac6c51b9e2fe55efd2670315364d79957afd9870ee24917d1a" Jan 20 16:45:03 crc kubenswrapper[4995]: I0120 16:45:03.545390 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7" Jan 20 16:45:06 crc kubenswrapper[4995]: I0120 16:45:06.562601 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" event={"ID":"8b17b582-a06b-4ece-b513-7f826c838f6f","Type":"ContainerStarted","Data":"6fbbfac314c2b3597f89bf5e6ba0a5038ff3ae2c784bb56ce156683750b76e36"} Jan 20 16:45:06 crc kubenswrapper[4995]: I0120 16:45:06.562951 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:08 crc kubenswrapper[4995]: I0120 16:45:08.580049 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" event={"ID":"7fae7627-5782-4525-ba17-4507d15764cd","Type":"ContainerStarted","Data":"f750d221bd27089d94965a6fb773b8a2fc5253d0665c164307d81643419644da"} Jan 20 16:45:08 crc kubenswrapper[4995]: I0120 16:45:08.580641 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:08 crc kubenswrapper[4995]: I0120 16:45:08.600569 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" podStartSLOduration=2.284976971 podStartE2EDuration="7.600549936s" podCreationTimestamp="2026-01-20 16:45:01 +0000 UTC" firstStartedPulling="2026-01-20 16:45:02.320111095 +0000 UTC m=+820.564715901" lastFinishedPulling="2026-01-20 16:45:07.63568405 +0000 UTC m=+825.880288866" observedRunningTime="2026-01-20 16:45:08.59847782 +0000 UTC m=+826.843082636" watchObservedRunningTime="2026-01-20 16:45:08.600549936 +0000 UTC m=+826.845154752" Jan 20 16:45:08 crc kubenswrapper[4995]: I0120 16:45:08.602410 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" podStartSLOduration=3.803447794 podStartE2EDuration="7.602400816s" podCreationTimestamp="2026-01-20 16:45:01 +0000 UTC" firstStartedPulling="2026-01-20 16:45:02.088827975 +0000 UTC m=+820.333432791" lastFinishedPulling="2026-01-20 16:45:05.887781007 +0000 UTC m=+824.132385813" observedRunningTime="2026-01-20 16:45:06.584187026 +0000 UTC m=+824.828791832" watchObservedRunningTime="2026-01-20 16:45:08.602400816 +0000 UTC m=+826.847005642" Jan 20 16:45:22 crc kubenswrapper[4995]: I0120 16:45:22.109394 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-5c6d4b5599-f8tsv" Jan 20 16:45:41 crc kubenswrapper[4995]: I0120 16:45:41.803481 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6dd7779458-w2rt4" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.592564 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-wbmjs"] Jan 20 16:45:42 crc kubenswrapper[4995]: E0120 16:45:42.593063 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca405ab9-f0ea-489f-bcec-8e6e686e66af" containerName="collect-profiles" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.593211 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca405ab9-f0ea-489f-bcec-8e6e686e66af" containerName="collect-profiles" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.593433 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca405ab9-f0ea-489f-bcec-8e6e686e66af" containerName="collect-profiles" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.596069 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: W0120 16:45:42.598120 4995 reflector.go:561] object-"metallb-system"/"frr-k8s-daemon-dockercfg-5n7mt": failed to list *v1.Secret: secrets "frr-k8s-daemon-dockercfg-5n7mt" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "metallb-system": no relationship found between node 'crc' and this object Jan 20 16:45:42 crc kubenswrapper[4995]: E0120 16:45:42.598357 4995 reflector.go:158] "Unhandled Error" err="object-\"metallb-system\"/\"frr-k8s-daemon-dockercfg-5n7mt\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"frr-k8s-daemon-dockercfg-5n7mt\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"metallb-system\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.602267 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.605135 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk"] Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.606134 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.607712 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.614545 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.624385 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk"] Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.696777 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-cgq47"] Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.697755 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.704220 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-tlwj8" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.707769 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.707827 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.707880 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.734538 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-4qn5w"] Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.735452 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.741310 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.752541 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-4qn5w"] Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.759516 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-frr-sockets\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.759746 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj7bv\" (UniqueName: \"kubernetes.io/projected/eb77594b-535f-4b63-967f-05cd3314ceb9-kube-api-access-sj7bv\") pod \"controller-6968d8fdc4-4qn5w\" (UID: \"eb77594b-535f-4b63-967f-05cd3314ceb9\") " pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.759823 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4cc8\" (UniqueName: \"kubernetes.io/projected/5606415b-e263-4896-90b7-62fab9ff9d6a-kube-api-access-v4cc8\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.759927 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-metrics-certs\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.760030 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-memberlist\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.764281 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g9zq\" (UniqueName: \"kubernetes.io/projected/473c4019-d6be-4420-a678-d18999ddbe1c-kube-api-access-7g9zq\") pod \"frr-k8s-webhook-server-7df86c4f6c-zwjrk\" (UID: \"473c4019-d6be-4420-a678-d18999ddbe1c\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767374 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-metallb-excludel2\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767431 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5606415b-e263-4896-90b7-62fab9ff9d6a-metrics-certs\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767462 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-reloader\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767496 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/473c4019-d6be-4420-a678-d18999ddbe1c-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-zwjrk\" (UID: \"473c4019-d6be-4420-a678-d18999ddbe1c\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767537 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb77594b-535f-4b63-967f-05cd3314ceb9-metrics-certs\") pod \"controller-6968d8fdc4-4qn5w\" (UID: \"eb77594b-535f-4b63-967f-05cd3314ceb9\") " pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767608 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-frr-conf\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767679 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npqtd\" (UniqueName: \"kubernetes.io/projected/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-kube-api-access-npqtd\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767701 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/eb77594b-535f-4b63-967f-05cd3314ceb9-cert\") pod \"controller-6968d8fdc4-4qn5w\" (UID: \"eb77594b-535f-4b63-967f-05cd3314ceb9\") " pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767737 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/5606415b-e263-4896-90b7-62fab9ff9d6a-frr-startup\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.767758 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-metrics\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868613 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-metrics-certs\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868661 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-memberlist\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868686 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g9zq\" (UniqueName: \"kubernetes.io/projected/473c4019-d6be-4420-a678-d18999ddbe1c-kube-api-access-7g9zq\") pod \"frr-k8s-webhook-server-7df86c4f6c-zwjrk\" (UID: \"473c4019-d6be-4420-a678-d18999ddbe1c\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868714 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-metallb-excludel2\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868737 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5606415b-e263-4896-90b7-62fab9ff9d6a-metrics-certs\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868757 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-reloader\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868793 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/473c4019-d6be-4420-a678-d18999ddbe1c-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-zwjrk\" (UID: \"473c4019-d6be-4420-a678-d18999ddbe1c\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868818 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb77594b-535f-4b63-967f-05cd3314ceb9-metrics-certs\") pod \"controller-6968d8fdc4-4qn5w\" (UID: \"eb77594b-535f-4b63-967f-05cd3314ceb9\") " pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868856 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-frr-conf\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: E0120 16:45:42.868853 4995 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868891 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npqtd\" (UniqueName: \"kubernetes.io/projected/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-kube-api-access-npqtd\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868912 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/eb77594b-535f-4b63-967f-05cd3314ceb9-cert\") pod \"controller-6968d8fdc4-4qn5w\" (UID: \"eb77594b-535f-4b63-967f-05cd3314ceb9\") " pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:42 crc kubenswrapper[4995]: E0120 16:45:42.868931 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-memberlist podName:08829f43-9e73-4c1a-b4dc-16d2f1e01a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:45:43.368912268 +0000 UTC m=+861.613517074 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-memberlist") pod "speaker-cgq47" (UID: "08829f43-9e73-4c1a-b4dc-16d2f1e01a3b") : secret "metallb-memberlist" not found Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868961 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-metrics\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.868990 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/5606415b-e263-4896-90b7-62fab9ff9d6a-frr-startup\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.869035 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-frr-sockets\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.869120 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj7bv\" (UniqueName: \"kubernetes.io/projected/eb77594b-535f-4b63-967f-05cd3314ceb9-kube-api-access-sj7bv\") pod \"controller-6968d8fdc4-4qn5w\" (UID: \"eb77594b-535f-4b63-967f-05cd3314ceb9\") " pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.869146 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4cc8\" (UniqueName: \"kubernetes.io/projected/5606415b-e263-4896-90b7-62fab9ff9d6a-kube-api-access-v4cc8\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.869296 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-frr-conf\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.869754 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-frr-sockets\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.869990 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-reloader\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.870437 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/5606415b-e263-4896-90b7-62fab9ff9d6a-frr-startup\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.872911 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-metallb-excludel2\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.873235 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.883021 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/eb77594b-535f-4b63-967f-05cd3314ceb9-metrics-certs\") pod \"controller-6968d8fdc4-4qn5w\" (UID: \"eb77594b-535f-4b63-967f-05cd3314ceb9\") " pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.883543 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/473c4019-d6be-4420-a678-d18999ddbe1c-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-zwjrk\" (UID: \"473c4019-d6be-4420-a678-d18999ddbe1c\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.883755 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/5606415b-e263-4896-90b7-62fab9ff9d6a-metrics\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.888537 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-metrics-certs\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.894727 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5606415b-e263-4896-90b7-62fab9ff9d6a-metrics-certs\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.896660 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/eb77594b-535f-4b63-967f-05cd3314ceb9-cert\") pod \"controller-6968d8fdc4-4qn5w\" (UID: \"eb77594b-535f-4b63-967f-05cd3314ceb9\") " pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.899687 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4cc8\" (UniqueName: \"kubernetes.io/projected/5606415b-e263-4896-90b7-62fab9ff9d6a-kube-api-access-v4cc8\") pod \"frr-k8s-wbmjs\" (UID: \"5606415b-e263-4896-90b7-62fab9ff9d6a\") " pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.908426 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npqtd\" (UniqueName: \"kubernetes.io/projected/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-kube-api-access-npqtd\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.915803 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g9zq\" (UniqueName: \"kubernetes.io/projected/473c4019-d6be-4420-a678-d18999ddbe1c-kube-api-access-7g9zq\") pod \"frr-k8s-webhook-server-7df86c4f6c-zwjrk\" (UID: \"473c4019-d6be-4420-a678-d18999ddbe1c\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:45:42 crc kubenswrapper[4995]: I0120 16:45:42.929106 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj7bv\" (UniqueName: \"kubernetes.io/projected/eb77594b-535f-4b63-967f-05cd3314ceb9-kube-api-access-sj7bv\") pod \"controller-6968d8fdc4-4qn5w\" (UID: \"eb77594b-535f-4b63-967f-05cd3314ceb9\") " pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.050008 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.375600 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-memberlist\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:43 crc kubenswrapper[4995]: E0120 16:45:43.375804 4995 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 20 16:45:43 crc kubenswrapper[4995]: E0120 16:45:43.376155 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-memberlist podName:08829f43-9e73-4c1a-b4dc-16d2f1e01a3b nodeName:}" failed. No retries permitted until 2026-01-20 16:45:44.376134118 +0000 UTC m=+862.620738934 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-memberlist") pod "speaker-cgq47" (UID: "08829f43-9e73-4c1a-b4dc-16d2f1e01a3b") : secret "metallb-memberlist" not found Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.454131 4995 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-5n7mt" Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.460373 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.462374 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.513566 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-4qn5w"] Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.730613 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk"] Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.816166 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerStarted","Data":"1a7c774844fae3e2747c2111ef139c6a3ae377b28078748c41850de2b4715a18"} Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.819378 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-4qn5w" event={"ID":"eb77594b-535f-4b63-967f-05cd3314ceb9","Type":"ContainerStarted","Data":"44aabbed39f1fe9af1c2bae84084114c13bbf871b4783dc89560d4389cf2c395"} Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.819416 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-4qn5w" event={"ID":"eb77594b-535f-4b63-967f-05cd3314ceb9","Type":"ContainerStarted","Data":"56b390f63abe4e510db409d8a016ec1ee7a18fe95c1248fcd418fbd14c7990b1"} Jan 20 16:45:43 crc kubenswrapper[4995]: I0120 16:45:43.820586 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" event={"ID":"473c4019-d6be-4420-a678-d18999ddbe1c","Type":"ContainerStarted","Data":"969aa0591278408ba16f141758ad8d289499e1d048691693bea24958baaae198"} Jan 20 16:45:44 crc kubenswrapper[4995]: I0120 16:45:44.394326 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-memberlist\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:44 crc kubenswrapper[4995]: I0120 16:45:44.405343 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/08829f43-9e73-4c1a-b4dc-16d2f1e01a3b-memberlist\") pod \"speaker-cgq47\" (UID: \"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b\") " pod="metallb-system/speaker-cgq47" Jan 20 16:45:44 crc kubenswrapper[4995]: I0120 16:45:44.513555 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-cgq47" Jan 20 16:45:44 crc kubenswrapper[4995]: W0120 16:45:44.537705 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod08829f43_9e73_4c1a_b4dc_16d2f1e01a3b.slice/crio-e1b248fbf2642ae27a55dbc4e48514945dd7758084263c5ca81bdf59da77562e WatchSource:0}: Error finding container e1b248fbf2642ae27a55dbc4e48514945dd7758084263c5ca81bdf59da77562e: Status 404 returned error can't find the container with id e1b248fbf2642ae27a55dbc4e48514945dd7758084263c5ca81bdf59da77562e Jan 20 16:45:44 crc kubenswrapper[4995]: I0120 16:45:44.830728 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-cgq47" event={"ID":"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b","Type":"ContainerStarted","Data":"f79fa601330716db2dd088e26875fc1bbfb88b70535893c4b3cbdd6e75e35bc5"} Jan 20 16:45:44 crc kubenswrapper[4995]: I0120 16:45:44.830778 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-cgq47" event={"ID":"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b","Type":"ContainerStarted","Data":"e1b248fbf2642ae27a55dbc4e48514945dd7758084263c5ca81bdf59da77562e"} Jan 20 16:45:44 crc kubenswrapper[4995]: I0120 16:45:44.836241 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-4qn5w" event={"ID":"eb77594b-535f-4b63-967f-05cd3314ceb9","Type":"ContainerStarted","Data":"d8a64344ad3ef90c05ea54cc4e923671e42813fe0f46b122f58a794b49442572"} Jan 20 16:45:44 crc kubenswrapper[4995]: I0120 16:45:44.837324 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:44 crc kubenswrapper[4995]: I0120 16:45:44.861702 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-4qn5w" podStartSLOduration=2.861683399 podStartE2EDuration="2.861683399s" podCreationTimestamp="2026-01-20 16:45:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:45:44.860147036 +0000 UTC m=+863.104751862" watchObservedRunningTime="2026-01-20 16:45:44.861683399 +0000 UTC m=+863.106288205" Jan 20 16:45:45 crc kubenswrapper[4995]: I0120 16:45:45.862448 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-cgq47" event={"ID":"08829f43-9e73-4c1a-b4dc-16d2f1e01a3b","Type":"ContainerStarted","Data":"8b54f1d99a11c7072a7f3d28690904694a05e6e2756af6c181cecf5c9c17fc13"} Jan 20 16:45:45 crc kubenswrapper[4995]: I0120 16:45:45.862523 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-cgq47" Jan 20 16:45:45 crc kubenswrapper[4995]: I0120 16:45:45.906308 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-cgq47" podStartSLOduration=3.906292835 podStartE2EDuration="3.906292835s" podCreationTimestamp="2026-01-20 16:45:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:45:45.902644656 +0000 UTC m=+864.147249472" watchObservedRunningTime="2026-01-20 16:45:45.906292835 +0000 UTC m=+864.150897641" Jan 20 16:45:52 crc kubenswrapper[4995]: I0120 16:45:52.920602 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" event={"ID":"473c4019-d6be-4420-a678-d18999ddbe1c","Type":"ContainerStarted","Data":"ca2d5886208a0457e05127313d53cc23bf9d8338c56a8c37a34294486471d6c8"} Jan 20 16:45:52 crc kubenswrapper[4995]: I0120 16:45:52.921283 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:45:52 crc kubenswrapper[4995]: I0120 16:45:52.944661 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" podStartSLOduration=3.0104328909999998 podStartE2EDuration="10.944642924s" podCreationTimestamp="2026-01-20 16:45:42 +0000 UTC" firstStartedPulling="2026-01-20 16:45:43.742365186 +0000 UTC m=+861.986969992" lastFinishedPulling="2026-01-20 16:45:51.676575179 +0000 UTC m=+869.921180025" observedRunningTime="2026-01-20 16:45:52.94082194 +0000 UTC m=+871.185426756" watchObservedRunningTime="2026-01-20 16:45:52.944642924 +0000 UTC m=+871.189247730" Jan 20 16:45:53 crc kubenswrapper[4995]: I0120 16:45:53.054350 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-4qn5w" Jan 20 16:45:54 crc kubenswrapper[4995]: I0120 16:45:54.520437 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-cgq47" Jan 20 16:45:56 crc kubenswrapper[4995]: I0120 16:45:56.949257 4995 generic.go:334] "Generic (PLEG): container finished" podID="5606415b-e263-4896-90b7-62fab9ff9d6a" containerID="870a5c1146ab024913300856bbc93e4811d8cf6e2fafa37460a404436f89ea3e" exitCode=0 Jan 20 16:45:56 crc kubenswrapper[4995]: I0120 16:45:56.949346 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerDied","Data":"870a5c1146ab024913300856bbc93e4811d8cf6e2fafa37460a404436f89ea3e"} Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.574523 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-8j96v"] Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.575296 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8j96v" Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.577714 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.577714 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.577971 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-kdxcx" Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.589190 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8j96v"] Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.678026 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bnsd\" (UniqueName: \"kubernetes.io/projected/59c05da0-9fcd-4f4b-9d26-c07d24322f69-kube-api-access-5bnsd\") pod \"openstack-operator-index-8j96v\" (UID: \"59c05da0-9fcd-4f4b-9d26-c07d24322f69\") " pod="openstack-operators/openstack-operator-index-8j96v" Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.779514 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bnsd\" (UniqueName: \"kubernetes.io/projected/59c05da0-9fcd-4f4b-9d26-c07d24322f69-kube-api-access-5bnsd\") pod \"openstack-operator-index-8j96v\" (UID: \"59c05da0-9fcd-4f4b-9d26-c07d24322f69\") " pod="openstack-operators/openstack-operator-index-8j96v" Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.797716 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bnsd\" (UniqueName: \"kubernetes.io/projected/59c05da0-9fcd-4f4b-9d26-c07d24322f69-kube-api-access-5bnsd\") pod \"openstack-operator-index-8j96v\" (UID: \"59c05da0-9fcd-4f4b-9d26-c07d24322f69\") " pod="openstack-operators/openstack-operator-index-8j96v" Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.911004 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8j96v" Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.960945 4995 generic.go:334] "Generic (PLEG): container finished" podID="5606415b-e263-4896-90b7-62fab9ff9d6a" containerID="7c18a68161bd2c5658ba89312867a130075711931ea8f21fcaabdb159cb88ee6" exitCode=0 Jan 20 16:45:57 crc kubenswrapper[4995]: I0120 16:45:57.960989 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerDied","Data":"7c18a68161bd2c5658ba89312867a130075711931ea8f21fcaabdb159cb88ee6"} Jan 20 16:45:58 crc kubenswrapper[4995]: I0120 16:45:58.354532 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8j96v"] Jan 20 16:45:58 crc kubenswrapper[4995]: W0120 16:45:58.365795 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59c05da0_9fcd_4f4b_9d26_c07d24322f69.slice/crio-e02503c53868f43bbe214636b8a25ba50d6b6d4aebbbdc4b12054649aa1b35aa WatchSource:0}: Error finding container e02503c53868f43bbe214636b8a25ba50d6b6d4aebbbdc4b12054649aa1b35aa: Status 404 returned error can't find the container with id e02503c53868f43bbe214636b8a25ba50d6b6d4aebbbdc4b12054649aa1b35aa Jan 20 16:45:58 crc kubenswrapper[4995]: I0120 16:45:58.967964 4995 generic.go:334] "Generic (PLEG): container finished" podID="5606415b-e263-4896-90b7-62fab9ff9d6a" containerID="0cd322e71cc83a62338082b71cd6a907d7a7534dd27b41e06e4cb0a621d9fffa" exitCode=0 Jan 20 16:45:58 crc kubenswrapper[4995]: I0120 16:45:58.968392 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerDied","Data":"0cd322e71cc83a62338082b71cd6a907d7a7534dd27b41e06e4cb0a621d9fffa"} Jan 20 16:45:58 crc kubenswrapper[4995]: I0120 16:45:58.970522 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8j96v" event={"ID":"59c05da0-9fcd-4f4b-9d26-c07d24322f69","Type":"ContainerStarted","Data":"e02503c53868f43bbe214636b8a25ba50d6b6d4aebbbdc4b12054649aa1b35aa"} Jan 20 16:45:59 crc kubenswrapper[4995]: I0120 16:45:59.979892 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerStarted","Data":"52b792a03a845d918555906dc56af3837224608398fb16e12653e10049f66d15"} Jan 20 16:45:59 crc kubenswrapper[4995]: I0120 16:45:59.980417 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerStarted","Data":"27b941f2c19454132c5b20700988837d936b9d5a7c4fa1c459fbc69b2c5ef066"} Jan 20 16:45:59 crc kubenswrapper[4995]: I0120 16:45:59.980436 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerStarted","Data":"4d7cebf479c35f37c51f9da5f50213aff9609b4ffa7e4b3925ad2bb5e0b6b1db"} Jan 20 16:46:00 crc kubenswrapper[4995]: I0120 16:46:00.947469 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8j96v"] Jan 20 16:46:00 crc kubenswrapper[4995]: I0120 16:46:00.988432 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerStarted","Data":"d7c0d1e16b92c777c529cd16052be8ebcc3324cc97d8980b8d0531f13181a61c"} Jan 20 16:46:01 crc kubenswrapper[4995]: I0120 16:46:01.556486 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-r87vf"] Jan 20 16:46:01 crc kubenswrapper[4995]: I0120 16:46:01.557558 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r87vf" Jan 20 16:46:01 crc kubenswrapper[4995]: I0120 16:46:01.562433 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-r87vf"] Jan 20 16:46:01 crc kubenswrapper[4995]: I0120 16:46:01.689510 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdb42\" (UniqueName: \"kubernetes.io/projected/3e43abde-a2a7-4334-a3a2-7859aad1a87b-kube-api-access-sdb42\") pod \"openstack-operator-index-r87vf\" (UID: \"3e43abde-a2a7-4334-a3a2-7859aad1a87b\") " pod="openstack-operators/openstack-operator-index-r87vf" Jan 20 16:46:01 crc kubenswrapper[4995]: I0120 16:46:01.790364 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdb42\" (UniqueName: \"kubernetes.io/projected/3e43abde-a2a7-4334-a3a2-7859aad1a87b-kube-api-access-sdb42\") pod \"openstack-operator-index-r87vf\" (UID: \"3e43abde-a2a7-4334-a3a2-7859aad1a87b\") " pod="openstack-operators/openstack-operator-index-r87vf" Jan 20 16:46:01 crc kubenswrapper[4995]: I0120 16:46:01.807355 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdb42\" (UniqueName: \"kubernetes.io/projected/3e43abde-a2a7-4334-a3a2-7859aad1a87b-kube-api-access-sdb42\") pod \"openstack-operator-index-r87vf\" (UID: \"3e43abde-a2a7-4334-a3a2-7859aad1a87b\") " pod="openstack-operators/openstack-operator-index-r87vf" Jan 20 16:46:01 crc kubenswrapper[4995]: I0120 16:46:01.915779 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r87vf" Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.041272 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerStarted","Data":"242faefc7bfb9d1c9c9198955d2b1e1ab8cda304c27770899b4f7e3c3323b3ff"} Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.041600 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-wbmjs" event={"ID":"5606415b-e263-4896-90b7-62fab9ff9d6a","Type":"ContainerStarted","Data":"91867d968fe30a894526f3624d79b3eee33a9870451efa96ff5bec0d28b266ee"} Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.041660 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.043200 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8j96v" event={"ID":"59c05da0-9fcd-4f4b-9d26-c07d24322f69","Type":"ContainerStarted","Data":"732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d"} Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.043287 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-8j96v" podUID="59c05da0-9fcd-4f4b-9d26-c07d24322f69" containerName="registry-server" containerID="cri-o://732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d" gracePeriod=2 Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.065731 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-wbmjs" podStartSLOduration=7.727742539 podStartE2EDuration="20.065715199s" podCreationTimestamp="2026-01-20 16:45:42 +0000 UTC" firstStartedPulling="2026-01-20 16:45:43.636774633 +0000 UTC m=+861.881379439" lastFinishedPulling="2026-01-20 16:45:55.974747283 +0000 UTC m=+874.219352099" observedRunningTime="2026-01-20 16:46:02.061309939 +0000 UTC m=+880.305914775" watchObservedRunningTime="2026-01-20 16:46:02.065715199 +0000 UTC m=+880.310320005" Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.084879 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-8j96v" podStartSLOduration=1.976936958 podStartE2EDuration="5.084827557s" podCreationTimestamp="2026-01-20 16:45:57 +0000 UTC" firstStartedPulling="2026-01-20 16:45:58.371252429 +0000 UTC m=+876.615857245" lastFinishedPulling="2026-01-20 16:46:01.479143038 +0000 UTC m=+879.723747844" observedRunningTime="2026-01-20 16:46:02.080241853 +0000 UTC m=+880.324846659" watchObservedRunningTime="2026-01-20 16:46:02.084827557 +0000 UTC m=+880.329432363" Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.118506 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-r87vf"] Jan 20 16:46:02 crc kubenswrapper[4995]: W0120 16:46:02.134506 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e43abde_a2a7_4334_a3a2_7859aad1a87b.slice/crio-121e54ceaae1ca0b093b183f693bdd33e07616252b1e977507968c8819803057 WatchSource:0}: Error finding container 121e54ceaae1ca0b093b183f693bdd33e07616252b1e977507968c8819803057: Status 404 returned error can't find the container with id 121e54ceaae1ca0b093b183f693bdd33e07616252b1e977507968c8819803057 Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.351559 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8j96v" Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.498206 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bnsd\" (UniqueName: \"kubernetes.io/projected/59c05da0-9fcd-4f4b-9d26-c07d24322f69-kube-api-access-5bnsd\") pod \"59c05da0-9fcd-4f4b-9d26-c07d24322f69\" (UID: \"59c05da0-9fcd-4f4b-9d26-c07d24322f69\") " Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.504439 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59c05da0-9fcd-4f4b-9d26-c07d24322f69-kube-api-access-5bnsd" (OuterVolumeSpecName: "kube-api-access-5bnsd") pod "59c05da0-9fcd-4f4b-9d26-c07d24322f69" (UID: "59c05da0-9fcd-4f4b-9d26-c07d24322f69"). InnerVolumeSpecName "kube-api-access-5bnsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:46:02 crc kubenswrapper[4995]: I0120 16:46:02.599287 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bnsd\" (UniqueName: \"kubernetes.io/projected/59c05da0-9fcd-4f4b-9d26-c07d24322f69-kube-api-access-5bnsd\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.052479 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r87vf" event={"ID":"3e43abde-a2a7-4334-a3a2-7859aad1a87b","Type":"ContainerStarted","Data":"142a8f2c40d7bf3e120a829fe34cff4ad9251800d43dd84fbd981560d8f90dd2"} Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.052616 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r87vf" event={"ID":"3e43abde-a2a7-4334-a3a2-7859aad1a87b","Type":"ContainerStarted","Data":"121e54ceaae1ca0b093b183f693bdd33e07616252b1e977507968c8819803057"} Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.055181 4995 generic.go:334] "Generic (PLEG): container finished" podID="59c05da0-9fcd-4f4b-9d26-c07d24322f69" containerID="732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d" exitCode=0 Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.055238 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8j96v" event={"ID":"59c05da0-9fcd-4f4b-9d26-c07d24322f69","Type":"ContainerDied","Data":"732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d"} Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.055270 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8j96v" event={"ID":"59c05da0-9fcd-4f4b-9d26-c07d24322f69","Type":"ContainerDied","Data":"e02503c53868f43bbe214636b8a25ba50d6b6d4aebbbdc4b12054649aa1b35aa"} Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.055292 4995 scope.go:117] "RemoveContainer" containerID="732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d" Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.055287 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8j96v" Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.077619 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-r87vf" podStartSLOduration=2.024040027 podStartE2EDuration="2.077592639s" podCreationTimestamp="2026-01-20 16:46:01 +0000 UTC" firstStartedPulling="2026-01-20 16:46:02.138761379 +0000 UTC m=+880.383366185" lastFinishedPulling="2026-01-20 16:46:02.192313991 +0000 UTC m=+880.436918797" observedRunningTime="2026-01-20 16:46:03.069091938 +0000 UTC m=+881.313696744" watchObservedRunningTime="2026-01-20 16:46:03.077592639 +0000 UTC m=+881.322197445" Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.088407 4995 scope.go:117] "RemoveContainer" containerID="732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d" Jan 20 16:46:03 crc kubenswrapper[4995]: E0120 16:46:03.089404 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d\": container with ID starting with 732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d not found: ID does not exist" containerID="732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d" Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.089472 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d"} err="failed to get container status \"732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d\": rpc error: code = NotFound desc = could not find container \"732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d\": container with ID starting with 732a2d31f51429e1bd0e8dd4ed9b03aebdfa9d24f90fe85c4813f7996c7baa0d not found: ID does not exist" Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.110838 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8j96v"] Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.116779 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-8j96v"] Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.462938 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.467806 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-zwjrk" Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.528723 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:46:03 crc kubenswrapper[4995]: I0120 16:46:03.998187 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59c05da0-9fcd-4f4b-9d26-c07d24322f69" path="/var/lib/kubelet/pods/59c05da0-9fcd-4f4b-9d26-c07d24322f69/volumes" Jan 20 16:46:11 crc kubenswrapper[4995]: I0120 16:46:11.916933 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-r87vf" Jan 20 16:46:11 crc kubenswrapper[4995]: I0120 16:46:11.919552 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-r87vf" Jan 20 16:46:11 crc kubenswrapper[4995]: I0120 16:46:11.962944 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-r87vf" Jan 20 16:46:12 crc kubenswrapper[4995]: I0120 16:46:12.156556 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-r87vf" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.465655 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-wbmjs" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.771307 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-blxbq"] Jan 20 16:46:13 crc kubenswrapper[4995]: E0120 16:46:13.772134 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59c05da0-9fcd-4f4b-9d26-c07d24322f69" containerName="registry-server" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.772151 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="59c05da0-9fcd-4f4b-9d26-c07d24322f69" containerName="registry-server" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.772366 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="59c05da0-9fcd-4f4b-9d26-c07d24322f69" containerName="registry-server" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.776117 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.782902 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-blxbq"] Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.850307 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-utilities\") pod \"redhat-marketplace-blxbq\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.850342 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mg5gp\" (UniqueName: \"kubernetes.io/projected/dece44ac-3c5f-414a-8114-2ac8faa9505c-kube-api-access-mg5gp\") pod \"redhat-marketplace-blxbq\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.850462 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-catalog-content\") pod \"redhat-marketplace-blxbq\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.953475 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-utilities\") pod \"redhat-marketplace-blxbq\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.953571 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mg5gp\" (UniqueName: \"kubernetes.io/projected/dece44ac-3c5f-414a-8114-2ac8faa9505c-kube-api-access-mg5gp\") pod \"redhat-marketplace-blxbq\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.953778 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-catalog-content\") pod \"redhat-marketplace-blxbq\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.954707 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-catalog-content\") pod \"redhat-marketplace-blxbq\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.955566 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-utilities\") pod \"redhat-marketplace-blxbq\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:13 crc kubenswrapper[4995]: I0120 16:46:13.979445 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mg5gp\" (UniqueName: \"kubernetes.io/projected/dece44ac-3c5f-414a-8114-2ac8faa9505c-kube-api-access-mg5gp\") pod \"redhat-marketplace-blxbq\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:14 crc kubenswrapper[4995]: I0120 16:46:14.101177 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:14 crc kubenswrapper[4995]: I0120 16:46:14.518604 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-blxbq"] Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.142967 4995 generic.go:334] "Generic (PLEG): container finished" podID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerID="a21fd7b342dfa079c48ea21e804af85364be56d862b5158b1ac99a2231d9bce8" exitCode=0 Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.143053 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blxbq" event={"ID":"dece44ac-3c5f-414a-8114-2ac8faa9505c","Type":"ContainerDied","Data":"a21fd7b342dfa079c48ea21e804af85364be56d862b5158b1ac99a2231d9bce8"} Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.143318 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blxbq" event={"ID":"dece44ac-3c5f-414a-8114-2ac8faa9505c","Type":"ContainerStarted","Data":"3806a349d4c01367b09eb359adc35ecbe70febccba153b2b3a05b99cd358b8db"} Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.756594 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mwhpn"] Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.758695 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.783033 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mwhpn"] Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.783924 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhjpp\" (UniqueName: \"kubernetes.io/projected/53d3bd3d-a4a1-434c-804f-c8e031042589-kube-api-access-lhjpp\") pod \"community-operators-mwhpn\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.784260 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-utilities\") pod \"community-operators-mwhpn\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.784326 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-catalog-content\") pod \"community-operators-mwhpn\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.885690 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhjpp\" (UniqueName: \"kubernetes.io/projected/53d3bd3d-a4a1-434c-804f-c8e031042589-kube-api-access-lhjpp\") pod \"community-operators-mwhpn\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.885912 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-utilities\") pod \"community-operators-mwhpn\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.885946 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-catalog-content\") pod \"community-operators-mwhpn\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.886928 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-catalog-content\") pod \"community-operators-mwhpn\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.887479 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-utilities\") pod \"community-operators-mwhpn\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:15 crc kubenswrapper[4995]: I0120 16:46:15.911572 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhjpp\" (UniqueName: \"kubernetes.io/projected/53d3bd3d-a4a1-434c-804f-c8e031042589-kube-api-access-lhjpp\") pod \"community-operators-mwhpn\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:16 crc kubenswrapper[4995]: I0120 16:46:16.106014 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:16 crc kubenswrapper[4995]: I0120 16:46:16.170534 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blxbq" event={"ID":"dece44ac-3c5f-414a-8114-2ac8faa9505c","Type":"ContainerStarted","Data":"19ca482c17346574359ef19492da30179386b3456403dd9a17aeb57950684c38"} Jan 20 16:46:16 crc kubenswrapper[4995]: I0120 16:46:16.645039 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mwhpn"] Jan 20 16:46:16 crc kubenswrapper[4995]: W0120 16:46:16.650997 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53d3bd3d_a4a1_434c_804f_c8e031042589.slice/crio-7b9adf8717701667c370ced89ab3decdb8e95d0c2886bc84f7b9863ef6a82416 WatchSource:0}: Error finding container 7b9adf8717701667c370ced89ab3decdb8e95d0c2886bc84f7b9863ef6a82416: Status 404 returned error can't find the container with id 7b9adf8717701667c370ced89ab3decdb8e95d0c2886bc84f7b9863ef6a82416 Jan 20 16:46:17 crc kubenswrapper[4995]: I0120 16:46:17.180696 4995 generic.go:334] "Generic (PLEG): container finished" podID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerID="19ca482c17346574359ef19492da30179386b3456403dd9a17aeb57950684c38" exitCode=0 Jan 20 16:46:17 crc kubenswrapper[4995]: I0120 16:46:17.182837 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blxbq" event={"ID":"dece44ac-3c5f-414a-8114-2ac8faa9505c","Type":"ContainerDied","Data":"19ca482c17346574359ef19492da30179386b3456403dd9a17aeb57950684c38"} Jan 20 16:46:17 crc kubenswrapper[4995]: I0120 16:46:17.185198 4995 generic.go:334] "Generic (PLEG): container finished" podID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerID="59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b" exitCode=0 Jan 20 16:46:17 crc kubenswrapper[4995]: I0120 16:46:17.185253 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwhpn" event={"ID":"53d3bd3d-a4a1-434c-804f-c8e031042589","Type":"ContainerDied","Data":"59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b"} Jan 20 16:46:17 crc kubenswrapper[4995]: I0120 16:46:17.185292 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwhpn" event={"ID":"53d3bd3d-a4a1-434c-804f-c8e031042589","Type":"ContainerStarted","Data":"7b9adf8717701667c370ced89ab3decdb8e95d0c2886bc84f7b9863ef6a82416"} Jan 20 16:46:18 crc kubenswrapper[4995]: I0120 16:46:18.196070 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blxbq" event={"ID":"dece44ac-3c5f-414a-8114-2ac8faa9505c","Type":"ContainerStarted","Data":"55ab3b56e4b461452ae8527762468349c7e3d944e23d3b672478de1daa1d8eda"} Jan 20 16:46:18 crc kubenswrapper[4995]: I0120 16:46:18.218602 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-blxbq" podStartSLOduration=2.501750133 podStartE2EDuration="5.218582652s" podCreationTimestamp="2026-01-20 16:46:13 +0000 UTC" firstStartedPulling="2026-01-20 16:46:15.145885147 +0000 UTC m=+893.390489963" lastFinishedPulling="2026-01-20 16:46:17.862717656 +0000 UTC m=+896.107322482" observedRunningTime="2026-01-20 16:46:18.216252779 +0000 UTC m=+896.460857585" watchObservedRunningTime="2026-01-20 16:46:18.218582652 +0000 UTC m=+896.463187458" Jan 20 16:46:19 crc kubenswrapper[4995]: I0120 16:46:19.205041 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwhpn" event={"ID":"53d3bd3d-a4a1-434c-804f-c8e031042589","Type":"ContainerDied","Data":"4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f"} Jan 20 16:46:19 crc kubenswrapper[4995]: I0120 16:46:19.204978 4995 generic.go:334] "Generic (PLEG): container finished" podID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerID="4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f" exitCode=0 Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.200538 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr"] Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.202013 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.203660 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-96dpw" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.213041 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwhpn" event={"ID":"53d3bd3d-a4a1-434c-804f-c8e031042589","Type":"ContainerStarted","Data":"8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a"} Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.215322 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr"] Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.244880 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mwhpn" podStartSLOduration=2.559252489 podStartE2EDuration="5.244865521s" podCreationTimestamp="2026-01-20 16:46:15 +0000 UTC" firstStartedPulling="2026-01-20 16:46:17.186925186 +0000 UTC m=+895.431529992" lastFinishedPulling="2026-01-20 16:46:19.872538218 +0000 UTC m=+898.117143024" observedRunningTime="2026-01-20 16:46:20.241028087 +0000 UTC m=+898.485632893" watchObservedRunningTime="2026-01-20 16:46:20.244865521 +0000 UTC m=+898.489470317" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.273243 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-util\") pod \"ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.273309 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l64pm\" (UniqueName: \"kubernetes.io/projected/da01a294-6f73-4389-8117-a857e195a1c8-kube-api-access-l64pm\") pod \"ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.273338 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-bundle\") pod \"ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.375000 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l64pm\" (UniqueName: \"kubernetes.io/projected/da01a294-6f73-4389-8117-a857e195a1c8-kube-api-access-l64pm\") pod \"ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.375055 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-bundle\") pod \"ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.375165 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-util\") pod \"ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.375570 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-util\") pod \"ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.375993 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-bundle\") pod \"ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.394817 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l64pm\" (UniqueName: \"kubernetes.io/projected/da01a294-6f73-4389-8117-a857e195a1c8-kube-api-access-l64pm\") pod \"ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.516249 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:20 crc kubenswrapper[4995]: I0120 16:46:20.923966 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr"] Jan 20 16:46:21 crc kubenswrapper[4995]: I0120 16:46:21.229930 4995 generic.go:334] "Generic (PLEG): container finished" podID="da01a294-6f73-4389-8117-a857e195a1c8" containerID="e234e48acfef134460ac0e8171ba7b3456b1010d865841f9f0b0679a1185b66a" exitCode=0 Jan 20 16:46:21 crc kubenswrapper[4995]: I0120 16:46:21.231281 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" event={"ID":"da01a294-6f73-4389-8117-a857e195a1c8","Type":"ContainerDied","Data":"e234e48acfef134460ac0e8171ba7b3456b1010d865841f9f0b0679a1185b66a"} Jan 20 16:46:21 crc kubenswrapper[4995]: I0120 16:46:21.242102 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" event={"ID":"da01a294-6f73-4389-8117-a857e195a1c8","Type":"ContainerStarted","Data":"99edcdc0809aab5889213fdeaceb47ee1a9cad96faa0d713d49452a50ffa5511"} Jan 20 16:46:22 crc kubenswrapper[4995]: I0120 16:46:22.250592 4995 generic.go:334] "Generic (PLEG): container finished" podID="da01a294-6f73-4389-8117-a857e195a1c8" containerID="bbdaa6af9ba2b78f273962105d19a2ecd01bed7b35dcafcedd010076a2e05496" exitCode=0 Jan 20 16:46:22 crc kubenswrapper[4995]: I0120 16:46:22.250689 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" event={"ID":"da01a294-6f73-4389-8117-a857e195a1c8","Type":"ContainerDied","Data":"bbdaa6af9ba2b78f273962105d19a2ecd01bed7b35dcafcedd010076a2e05496"} Jan 20 16:46:23 crc kubenswrapper[4995]: I0120 16:46:23.258902 4995 generic.go:334] "Generic (PLEG): container finished" podID="da01a294-6f73-4389-8117-a857e195a1c8" containerID="d24f668bea53ee59c2b1d934b474a80493a44086704caa54ea44a07d285eb36a" exitCode=0 Jan 20 16:46:23 crc kubenswrapper[4995]: I0120 16:46:23.258994 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" event={"ID":"da01a294-6f73-4389-8117-a857e195a1c8","Type":"ContainerDied","Data":"d24f668bea53ee59c2b1d934b474a80493a44086704caa54ea44a07d285eb36a"} Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.101427 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.101497 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:24 crc kubenswrapper[4995]: E0120 16:46:24.103732 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda01a294_6f73_4389_8117_a857e195a1c8.slice/crio-bbdaa6af9ba2b78f273962105d19a2ecd01bed7b35dcafcedd010076a2e05496.scope\": RecentStats: unable to find data in memory cache]" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.171966 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.303526 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.545635 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.651827 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-bundle\") pod \"da01a294-6f73-4389-8117-a857e195a1c8\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.651878 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-util\") pod \"da01a294-6f73-4389-8117-a857e195a1c8\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.651930 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l64pm\" (UniqueName: \"kubernetes.io/projected/da01a294-6f73-4389-8117-a857e195a1c8-kube-api-access-l64pm\") pod \"da01a294-6f73-4389-8117-a857e195a1c8\" (UID: \"da01a294-6f73-4389-8117-a857e195a1c8\") " Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.653257 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-bundle" (OuterVolumeSpecName: "bundle") pod "da01a294-6f73-4389-8117-a857e195a1c8" (UID: "da01a294-6f73-4389-8117-a857e195a1c8"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.659523 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da01a294-6f73-4389-8117-a857e195a1c8-kube-api-access-l64pm" (OuterVolumeSpecName: "kube-api-access-l64pm") pod "da01a294-6f73-4389-8117-a857e195a1c8" (UID: "da01a294-6f73-4389-8117-a857e195a1c8"). InnerVolumeSpecName "kube-api-access-l64pm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.666073 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-util" (OuterVolumeSpecName: "util") pod "da01a294-6f73-4389-8117-a857e195a1c8" (UID: "da01a294-6f73-4389-8117-a857e195a1c8"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.754027 4995 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.754118 4995 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/da01a294-6f73-4389-8117-a857e195a1c8-util\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:24 crc kubenswrapper[4995]: I0120 16:46:24.754139 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l64pm\" (UniqueName: \"kubernetes.io/projected/da01a294-6f73-4389-8117-a857e195a1c8-kube-api-access-l64pm\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:25 crc kubenswrapper[4995]: I0120 16:46:25.274840 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" event={"ID":"da01a294-6f73-4389-8117-a857e195a1c8","Type":"ContainerDied","Data":"99edcdc0809aab5889213fdeaceb47ee1a9cad96faa0d713d49452a50ffa5511"} Jan 20 16:46:25 crc kubenswrapper[4995]: I0120 16:46:25.274928 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99edcdc0809aab5889213fdeaceb47ee1a9cad96faa0d713d49452a50ffa5511" Jan 20 16:46:25 crc kubenswrapper[4995]: I0120 16:46:25.274878 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr" Jan 20 16:46:26 crc kubenswrapper[4995]: I0120 16:46:26.106545 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:26 crc kubenswrapper[4995]: I0120 16:46:26.106979 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:26 crc kubenswrapper[4995]: I0120 16:46:26.170244 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:26 crc kubenswrapper[4995]: I0120 16:46:26.346526 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.463157 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd"] Jan 20 16:46:27 crc kubenswrapper[4995]: E0120 16:46:27.463398 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da01a294-6f73-4389-8117-a857e195a1c8" containerName="pull" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.463411 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="da01a294-6f73-4389-8117-a857e195a1c8" containerName="pull" Jan 20 16:46:27 crc kubenswrapper[4995]: E0120 16:46:27.463423 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da01a294-6f73-4389-8117-a857e195a1c8" containerName="util" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.463429 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="da01a294-6f73-4389-8117-a857e195a1c8" containerName="util" Jan 20 16:46:27 crc kubenswrapper[4995]: E0120 16:46:27.463444 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da01a294-6f73-4389-8117-a857e195a1c8" containerName="extract" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.463451 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="da01a294-6f73-4389-8117-a857e195a1c8" containerName="extract" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.463555 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="da01a294-6f73-4389-8117-a857e195a1c8" containerName="extract" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.463954 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.465845 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-jqlkc" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.488737 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd"] Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.490210 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqsf8\" (UniqueName: \"kubernetes.io/projected/6bd0aa66-ff4d-43ff-925d-e3ead5943058-kube-api-access-mqsf8\") pod \"openstack-operator-controller-init-5c987874f9-t2thd\" (UID: \"6bd0aa66-ff4d-43ff-925d-e3ead5943058\") " pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.545946 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mwhpn"] Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.591872 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqsf8\" (UniqueName: \"kubernetes.io/projected/6bd0aa66-ff4d-43ff-925d-e3ead5943058-kube-api-access-mqsf8\") pod \"openstack-operator-controller-init-5c987874f9-t2thd\" (UID: \"6bd0aa66-ff4d-43ff-925d-e3ead5943058\") " pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.607916 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqsf8\" (UniqueName: \"kubernetes.io/projected/6bd0aa66-ff4d-43ff-925d-e3ead5943058-kube-api-access-mqsf8\") pod \"openstack-operator-controller-init-5c987874f9-t2thd\" (UID: \"6bd0aa66-ff4d-43ff-925d-e3ead5943058\") " pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.752209 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-blxbq"] Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.752535 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-blxbq" podUID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerName="registry-server" containerID="cri-o://55ab3b56e4b461452ae8527762468349c7e3d944e23d3b672478de1daa1d8eda" gracePeriod=2 Jan 20 16:46:27 crc kubenswrapper[4995]: I0120 16:46:27.783768 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.270670 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd"] Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.305847 4995 generic.go:334] "Generic (PLEG): container finished" podID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerID="55ab3b56e4b461452ae8527762468349c7e3d944e23d3b672478de1daa1d8eda" exitCode=0 Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.305924 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blxbq" event={"ID":"dece44ac-3c5f-414a-8114-2ac8faa9505c","Type":"ContainerDied","Data":"55ab3b56e4b461452ae8527762468349c7e3d944e23d3b672478de1daa1d8eda"} Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.306661 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" event={"ID":"6bd0aa66-ff4d-43ff-925d-e3ead5943058","Type":"ContainerStarted","Data":"b155da7743b1cbac294b83b7568e73d5cc033ad1ec5e48abe2d58beaa46ca33a"} Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.306838 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mwhpn" podUID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerName="registry-server" containerID="cri-o://8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a" gracePeriod=2 Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.627230 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.723625 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.725498 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5gp\" (UniqueName: \"kubernetes.io/projected/dece44ac-3c5f-414a-8114-2ac8faa9505c-kube-api-access-mg5gp\") pod \"dece44ac-3c5f-414a-8114-2ac8faa9505c\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.725657 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-utilities\") pod \"dece44ac-3c5f-414a-8114-2ac8faa9505c\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.726605 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-utilities" (OuterVolumeSpecName: "utilities") pod "dece44ac-3c5f-414a-8114-2ac8faa9505c" (UID: "dece44ac-3c5f-414a-8114-2ac8faa9505c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.726743 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-catalog-content\") pod \"dece44ac-3c5f-414a-8114-2ac8faa9505c\" (UID: \"dece44ac-3c5f-414a-8114-2ac8faa9505c\") " Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.730511 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dece44ac-3c5f-414a-8114-2ac8faa9505c-kube-api-access-mg5gp" (OuterVolumeSpecName: "kube-api-access-mg5gp") pod "dece44ac-3c5f-414a-8114-2ac8faa9505c" (UID: "dece44ac-3c5f-414a-8114-2ac8faa9505c"). InnerVolumeSpecName "kube-api-access-mg5gp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.743727 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5gp\" (UniqueName: \"kubernetes.io/projected/dece44ac-3c5f-414a-8114-2ac8faa9505c-kube-api-access-mg5gp\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.743764 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.753205 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dece44ac-3c5f-414a-8114-2ac8faa9505c" (UID: "dece44ac-3c5f-414a-8114-2ac8faa9505c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.845043 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-catalog-content\") pod \"53d3bd3d-a4a1-434c-804f-c8e031042589\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.845206 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-utilities\") pod \"53d3bd3d-a4a1-434c-804f-c8e031042589\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.845236 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhjpp\" (UniqueName: \"kubernetes.io/projected/53d3bd3d-a4a1-434c-804f-c8e031042589-kube-api-access-lhjpp\") pod \"53d3bd3d-a4a1-434c-804f-c8e031042589\" (UID: \"53d3bd3d-a4a1-434c-804f-c8e031042589\") " Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.845822 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-utilities" (OuterVolumeSpecName: "utilities") pod "53d3bd3d-a4a1-434c-804f-c8e031042589" (UID: "53d3bd3d-a4a1-434c-804f-c8e031042589"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.846068 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dece44ac-3c5f-414a-8114-2ac8faa9505c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.846099 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.852156 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53d3bd3d-a4a1-434c-804f-c8e031042589-kube-api-access-lhjpp" (OuterVolumeSpecName: "kube-api-access-lhjpp") pod "53d3bd3d-a4a1-434c-804f-c8e031042589" (UID: "53d3bd3d-a4a1-434c-804f-c8e031042589"). InnerVolumeSpecName "kube-api-access-lhjpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.906965 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "53d3bd3d-a4a1-434c-804f-c8e031042589" (UID: "53d3bd3d-a4a1-434c-804f-c8e031042589"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.947006 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53d3bd3d-a4a1-434c-804f-c8e031042589-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:28 crc kubenswrapper[4995]: I0120 16:46:28.947037 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhjpp\" (UniqueName: \"kubernetes.io/projected/53d3bd3d-a4a1-434c-804f-c8e031042589-kube-api-access-lhjpp\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.318642 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-blxbq" event={"ID":"dece44ac-3c5f-414a-8114-2ac8faa9505c","Type":"ContainerDied","Data":"3806a349d4c01367b09eb359adc35ecbe70febccba153b2b3a05b99cd358b8db"} Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.318687 4995 scope.go:117] "RemoveContainer" containerID="55ab3b56e4b461452ae8527762468349c7e3d944e23d3b672478de1daa1d8eda" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.318789 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-blxbq" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.334362 4995 generic.go:334] "Generic (PLEG): container finished" podID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerID="8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a" exitCode=0 Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.334400 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwhpn" event={"ID":"53d3bd3d-a4a1-434c-804f-c8e031042589","Type":"ContainerDied","Data":"8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a"} Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.334422 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mwhpn" event={"ID":"53d3bd3d-a4a1-434c-804f-c8e031042589","Type":"ContainerDied","Data":"7b9adf8717701667c370ced89ab3decdb8e95d0c2886bc84f7b9863ef6a82416"} Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.334467 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mwhpn" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.349980 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-blxbq"] Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.354538 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-blxbq"] Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.367736 4995 scope.go:117] "RemoveContainer" containerID="19ca482c17346574359ef19492da30179386b3456403dd9a17aeb57950684c38" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.372046 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mwhpn"] Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.378923 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mwhpn"] Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.411551 4995 scope.go:117] "RemoveContainer" containerID="a21fd7b342dfa079c48ea21e804af85364be56d862b5158b1ac99a2231d9bce8" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.429530 4995 scope.go:117] "RemoveContainer" containerID="8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.442330 4995 scope.go:117] "RemoveContainer" containerID="4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.455396 4995 scope.go:117] "RemoveContainer" containerID="59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.488971 4995 scope.go:117] "RemoveContainer" containerID="8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a" Jan 20 16:46:29 crc kubenswrapper[4995]: E0120 16:46:29.489465 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a\": container with ID starting with 8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a not found: ID does not exist" containerID="8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.489499 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a"} err="failed to get container status \"8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a\": rpc error: code = NotFound desc = could not find container \"8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a\": container with ID starting with 8346f5f1279ecbff988ac52b295a4c2251c1611c6b72425f56ebf6f18207bb0a not found: ID does not exist" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.489522 4995 scope.go:117] "RemoveContainer" containerID="4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f" Jan 20 16:46:29 crc kubenswrapper[4995]: E0120 16:46:29.489894 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f\": container with ID starting with 4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f not found: ID does not exist" containerID="4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.489916 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f"} err="failed to get container status \"4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f\": rpc error: code = NotFound desc = could not find container \"4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f\": container with ID starting with 4ee0fd5c377adf7e01e1497bfca2bd97e795b6dad439270576c9e4d8caa4907f not found: ID does not exist" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.489927 4995 scope.go:117] "RemoveContainer" containerID="59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b" Jan 20 16:46:29 crc kubenswrapper[4995]: E0120 16:46:29.490186 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b\": container with ID starting with 59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b not found: ID does not exist" containerID="59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b" Jan 20 16:46:29 crc kubenswrapper[4995]: I0120 16:46:29.490206 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b"} err="failed to get container status \"59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b\": rpc error: code = NotFound desc = could not find container \"59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b\": container with ID starting with 59ec28c45cfa01c42139e780a745cda7054804e5f8838e57570551cd5911cd7b not found: ID does not exist" Jan 20 16:46:30 crc kubenswrapper[4995]: I0120 16:46:30.030942 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53d3bd3d-a4a1-434c-804f-c8e031042589" path="/var/lib/kubelet/pods/53d3bd3d-a4a1-434c-804f-c8e031042589/volumes" Jan 20 16:46:30 crc kubenswrapper[4995]: I0120 16:46:30.032455 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dece44ac-3c5f-414a-8114-2ac8faa9505c" path="/var/lib/kubelet/pods/dece44ac-3c5f-414a-8114-2ac8faa9505c/volumes" Jan 20 16:46:30 crc kubenswrapper[4995]: I0120 16:46:30.571515 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:46:30 crc kubenswrapper[4995]: I0120 16:46:30.571574 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:46:32 crc kubenswrapper[4995]: I0120 16:46:32.359089 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" event={"ID":"6bd0aa66-ff4d-43ff-925d-e3ead5943058","Type":"ContainerStarted","Data":"d76caaeb2d3c7ca761a6e830ae341ecb56889d160c7442dbca8a889c8088e070"} Jan 20 16:46:32 crc kubenswrapper[4995]: I0120 16:46:32.359619 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" Jan 20 16:46:32 crc kubenswrapper[4995]: I0120 16:46:32.411695 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" podStartSLOduration=1.6872291019999999 podStartE2EDuration="5.411673232s" podCreationTimestamp="2026-01-20 16:46:27 +0000 UTC" firstStartedPulling="2026-01-20 16:46:28.278801536 +0000 UTC m=+906.523406342" lastFinishedPulling="2026-01-20 16:46:32.003245666 +0000 UTC m=+910.247850472" observedRunningTime="2026-01-20 16:46:32.406521462 +0000 UTC m=+910.651126268" watchObservedRunningTime="2026-01-20 16:46:32.411673232 +0000 UTC m=+910.656278038" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.170021 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-24bds"] Jan 20 16:46:34 crc kubenswrapper[4995]: E0120 16:46:34.170818 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerName="extract-content" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.170840 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerName="extract-content" Jan 20 16:46:34 crc kubenswrapper[4995]: E0120 16:46:34.170859 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerName="extract-utilities" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.170870 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerName="extract-utilities" Jan 20 16:46:34 crc kubenswrapper[4995]: E0120 16:46:34.170886 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerName="registry-server" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.170895 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerName="registry-server" Jan 20 16:46:34 crc kubenswrapper[4995]: E0120 16:46:34.170911 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerName="extract-content" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.170921 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerName="extract-content" Jan 20 16:46:34 crc kubenswrapper[4995]: E0120 16:46:34.170935 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerName="extract-utilities" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.170945 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerName="extract-utilities" Jan 20 16:46:34 crc kubenswrapper[4995]: E0120 16:46:34.170961 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerName="registry-server" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.170970 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerName="registry-server" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.171207 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="dece44ac-3c5f-414a-8114-2ac8faa9505c" containerName="registry-server" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.171238 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="53d3bd3d-a4a1-434c-804f-c8e031042589" containerName="registry-server" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.172607 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.177220 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-24bds"] Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.264844 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2j4l\" (UniqueName: \"kubernetes.io/projected/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-kube-api-access-q2j4l\") pod \"certified-operators-24bds\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.265110 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-catalog-content\") pod \"certified-operators-24bds\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.265203 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-utilities\") pod \"certified-operators-24bds\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: E0120 16:46:34.292824 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda01a294_6f73_4389_8117_a857e195a1c8.slice/crio-bbdaa6af9ba2b78f273962105d19a2ecd01bed7b35dcafcedd010076a2e05496.scope\": RecentStats: unable to find data in memory cache]" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.366867 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2j4l\" (UniqueName: \"kubernetes.io/projected/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-kube-api-access-q2j4l\") pod \"certified-operators-24bds\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.366948 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-catalog-content\") pod \"certified-operators-24bds\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.366993 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-utilities\") pod \"certified-operators-24bds\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.367610 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-utilities\") pod \"certified-operators-24bds\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.368107 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-catalog-content\") pod \"certified-operators-24bds\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.390417 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2j4l\" (UniqueName: \"kubernetes.io/projected/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-kube-api-access-q2j4l\") pod \"certified-operators-24bds\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.495499 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:34 crc kubenswrapper[4995]: I0120 16:46:34.749167 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-24bds"] Jan 20 16:46:35 crc kubenswrapper[4995]: I0120 16:46:35.380128 4995 generic.go:334] "Generic (PLEG): container finished" podID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerID="bae4390b3b38a8dda275fbc1c292bc8e65f46a13edf5068126b4d44b1587d776" exitCode=0 Jan 20 16:46:35 crc kubenswrapper[4995]: I0120 16:46:35.380256 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24bds" event={"ID":"5d83c85c-c6b4-4826-afb0-9b6ae76414ab","Type":"ContainerDied","Data":"bae4390b3b38a8dda275fbc1c292bc8e65f46a13edf5068126b4d44b1587d776"} Jan 20 16:46:35 crc kubenswrapper[4995]: I0120 16:46:35.380586 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24bds" event={"ID":"5d83c85c-c6b4-4826-afb0-9b6ae76414ab","Type":"ContainerStarted","Data":"9871328dd6f4dc441ce236dbda66fa4cc86f6b45b3b65b7c8ce719c2894f0cf5"} Jan 20 16:46:36 crc kubenswrapper[4995]: I0120 16:46:36.390766 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24bds" event={"ID":"5d83c85c-c6b4-4826-afb0-9b6ae76414ab","Type":"ContainerStarted","Data":"36aa6570cf42f08c405801b019ee5262cf2e6a532e0c7647e7b1a97020982b54"} Jan 20 16:46:37 crc kubenswrapper[4995]: I0120 16:46:37.400229 4995 generic.go:334] "Generic (PLEG): container finished" podID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerID="36aa6570cf42f08c405801b019ee5262cf2e6a532e0c7647e7b1a97020982b54" exitCode=0 Jan 20 16:46:37 crc kubenswrapper[4995]: I0120 16:46:37.400350 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24bds" event={"ID":"5d83c85c-c6b4-4826-afb0-9b6ae76414ab","Type":"ContainerDied","Data":"36aa6570cf42f08c405801b019ee5262cf2e6a532e0c7647e7b1a97020982b54"} Jan 20 16:46:37 crc kubenswrapper[4995]: I0120 16:46:37.786841 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-5c987874f9-t2thd" Jan 20 16:46:38 crc kubenswrapper[4995]: I0120 16:46:38.416733 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24bds" event={"ID":"5d83c85c-c6b4-4826-afb0-9b6ae76414ab","Type":"ContainerStarted","Data":"93608cec42d79e876e2223b16b643978cabdf4a5db3f2880874abbb916946198"} Jan 20 16:46:38 crc kubenswrapper[4995]: I0120 16:46:38.455160 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-24bds" podStartSLOduration=1.985261589 podStartE2EDuration="4.455138545s" podCreationTimestamp="2026-01-20 16:46:34 +0000 UTC" firstStartedPulling="2026-01-20 16:46:35.382337049 +0000 UTC m=+913.626941865" lastFinishedPulling="2026-01-20 16:46:37.852214015 +0000 UTC m=+916.096818821" observedRunningTime="2026-01-20 16:46:38.451189118 +0000 UTC m=+916.695793924" watchObservedRunningTime="2026-01-20 16:46:38.455138545 +0000 UTC m=+916.699743351" Jan 20 16:46:44 crc kubenswrapper[4995]: E0120 16:46:44.449526 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda01a294_6f73_4389_8117_a857e195a1c8.slice/crio-bbdaa6af9ba2b78f273962105d19a2ecd01bed7b35dcafcedd010076a2e05496.scope\": RecentStats: unable to find data in memory cache]" Jan 20 16:46:44 crc kubenswrapper[4995]: I0120 16:46:44.495738 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:44 crc kubenswrapper[4995]: I0120 16:46:44.495779 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:44 crc kubenswrapper[4995]: I0120 16:46:44.545204 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:45 crc kubenswrapper[4995]: I0120 16:46:45.511337 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:45 crc kubenswrapper[4995]: I0120 16:46:45.552375 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-24bds"] Jan 20 16:46:47 crc kubenswrapper[4995]: I0120 16:46:47.484767 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-24bds" podUID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerName="registry-server" containerID="cri-o://93608cec42d79e876e2223b16b643978cabdf4a5db3f2880874abbb916946198" gracePeriod=2 Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.537601 4995 generic.go:334] "Generic (PLEG): container finished" podID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerID="93608cec42d79e876e2223b16b643978cabdf4a5db3f2880874abbb916946198" exitCode=0 Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.537680 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24bds" event={"ID":"5d83c85c-c6b4-4826-afb0-9b6ae76414ab","Type":"ContainerDied","Data":"93608cec42d79e876e2223b16b643978cabdf4a5db3f2880874abbb916946198"} Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.784787 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.874267 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2j4l\" (UniqueName: \"kubernetes.io/projected/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-kube-api-access-q2j4l\") pod \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.874318 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-utilities\") pod \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.874412 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-catalog-content\") pod \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\" (UID: \"5d83c85c-c6b4-4826-afb0-9b6ae76414ab\") " Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.881128 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-utilities" (OuterVolumeSpecName: "utilities") pod "5d83c85c-c6b4-4826-afb0-9b6ae76414ab" (UID: "5d83c85c-c6b4-4826-afb0-9b6ae76414ab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.894213 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-kube-api-access-q2j4l" (OuterVolumeSpecName: "kube-api-access-q2j4l") pod "5d83c85c-c6b4-4826-afb0-9b6ae76414ab" (UID: "5d83c85c-c6b4-4826-afb0-9b6ae76414ab"). InnerVolumeSpecName "kube-api-access-q2j4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.917442 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5d83c85c-c6b4-4826-afb0-9b6ae76414ab" (UID: "5d83c85c-c6b4-4826-afb0-9b6ae76414ab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.976447 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.976694 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2j4l\" (UniqueName: \"kubernetes.io/projected/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-kube-api-access-q2j4l\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:49 crc kubenswrapper[4995]: I0120 16:46:49.976769 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d83c85c-c6b4-4826-afb0-9b6ae76414ab-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:46:50 crc kubenswrapper[4995]: I0120 16:46:50.545474 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-24bds" event={"ID":"5d83c85c-c6b4-4826-afb0-9b6ae76414ab","Type":"ContainerDied","Data":"9871328dd6f4dc441ce236dbda66fa4cc86f6b45b3b65b7c8ce719c2894f0cf5"} Jan 20 16:46:50 crc kubenswrapper[4995]: I0120 16:46:50.545544 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-24bds" Jan 20 16:46:50 crc kubenswrapper[4995]: I0120 16:46:50.545550 4995 scope.go:117] "RemoveContainer" containerID="93608cec42d79e876e2223b16b643978cabdf4a5db3f2880874abbb916946198" Jan 20 16:46:50 crc kubenswrapper[4995]: I0120 16:46:50.560970 4995 scope.go:117] "RemoveContainer" containerID="36aa6570cf42f08c405801b019ee5262cf2e6a532e0c7647e7b1a97020982b54" Jan 20 16:46:50 crc kubenswrapper[4995]: I0120 16:46:50.562046 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-24bds"] Jan 20 16:46:50 crc kubenswrapper[4995]: I0120 16:46:50.566111 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-24bds"] Jan 20 16:46:50 crc kubenswrapper[4995]: I0120 16:46:50.590536 4995 scope.go:117] "RemoveContainer" containerID="bae4390b3b38a8dda275fbc1c292bc8e65f46a13edf5068126b4d44b1587d776" Jan 20 16:46:52 crc kubenswrapper[4995]: I0120 16:46:52.005928 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" path="/var/lib/kubelet/pods/5d83c85c-c6b4-4826-afb0-9b6ae76414ab/volumes" Jan 20 16:46:54 crc kubenswrapper[4995]: E0120 16:46:54.621397 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda01a294_6f73_4389_8117_a857e195a1c8.slice/crio-bbdaa6af9ba2b78f273962105d19a2ecd01bed7b35dcafcedd010076a2e05496.scope\": RecentStats: unable to find data in memory cache]" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.647390 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch"] Jan 20 16:46:58 crc kubenswrapper[4995]: E0120 16:46:58.647977 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerName="extract-content" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.647993 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerName="extract-content" Jan 20 16:46:58 crc kubenswrapper[4995]: E0120 16:46:58.648017 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerName="extract-utilities" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.648025 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerName="extract-utilities" Jan 20 16:46:58 crc kubenswrapper[4995]: E0120 16:46:58.648041 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerName="registry-server" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.648049 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerName="registry-server" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.648199 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d83c85c-c6b4-4826-afb0-9b6ae76414ab" containerName="registry-server" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.648713 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.650525 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-lm2ls" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.654986 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.656236 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.658866 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-qsbbs" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.660605 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.675006 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.689869 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-mj76w"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.690824 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.696866 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-nc8m6" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.710979 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-mj76w"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.733602 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.734261 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.743230 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-sv8bq" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.758154 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.783185 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.784274 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.787982 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-wzhcr" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.804741 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4659v\" (UniqueName: \"kubernetes.io/projected/c0a3e997-8709-444b-ae4e-8fc34b04cb6e-kube-api-access-4659v\") pod \"barbican-operator-controller-manager-7ddb5c749-7zdch\" (UID: \"c0a3e997-8709-444b-ae4e-8fc34b04cb6e\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.804825 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbhj2\" (UniqueName: \"kubernetes.io/projected/3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a-kube-api-access-qbhj2\") pod \"cinder-operator-controller-manager-9b68f5989-zgvcz\" (UID: \"3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.804911 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlh2w\" (UniqueName: \"kubernetes.io/projected/9f302bf3-1501-44cc-924c-2e5c42c0eb58-kube-api-access-tlh2w\") pod \"designate-operator-controller-manager-9f958b845-mj76w\" (UID: \"9f302bf3-1501-44cc-924c-2e5c42c0eb58\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.818153 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.840139 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.840930 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.842962 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-m8czm" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.845225 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.869129 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.869949 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.874303 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.875051 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.876430 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.876575 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-7qgmn" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.878517 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-g54fz" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.878726 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.883033 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.887137 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.887845 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.892568 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-hw8fx" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.903094 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.906467 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgtmk\" (UniqueName: \"kubernetes.io/projected/072647c8-2d0e-4716-bb29-a87e3ff5cd29-kube-api-access-lgtmk\") pod \"glance-operator-controller-manager-c6994669c-bxm9j\" (UID: \"072647c8-2d0e-4716-bb29-a87e3ff5cd29\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.906507 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbhj2\" (UniqueName: \"kubernetes.io/projected/3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a-kube-api-access-qbhj2\") pod \"cinder-operator-controller-manager-9b68f5989-zgvcz\" (UID: \"3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.906542 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6m7k\" (UniqueName: \"kubernetes.io/projected/49392c07-237b-447e-a126-f06e1cbf32a2-kube-api-access-n6m7k\") pod \"heat-operator-controller-manager-594c8c9d5d-wm2kb\" (UID: \"49392c07-237b-447e-a126-f06e1cbf32a2\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.906592 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlh2w\" (UniqueName: \"kubernetes.io/projected/9f302bf3-1501-44cc-924c-2e5c42c0eb58-kube-api-access-tlh2w\") pod \"designate-operator-controller-manager-9f958b845-mj76w\" (UID: \"9f302bf3-1501-44cc-924c-2e5c42c0eb58\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.906616 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4659v\" (UniqueName: \"kubernetes.io/projected/c0a3e997-8709-444b-ae4e-8fc34b04cb6e-kube-api-access-4659v\") pod \"barbican-operator-controller-manager-7ddb5c749-7zdch\" (UID: \"c0a3e997-8709-444b-ae4e-8fc34b04cb6e\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.930699 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbhj2\" (UniqueName: \"kubernetes.io/projected/3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a-kube-api-access-qbhj2\") pod \"cinder-operator-controller-manager-9b68f5989-zgvcz\" (UID: \"3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.936748 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlh2w\" (UniqueName: \"kubernetes.io/projected/9f302bf3-1501-44cc-924c-2e5c42c0eb58-kube-api-access-tlh2w\") pod \"designate-operator-controller-manager-9f958b845-mj76w\" (UID: \"9f302bf3-1501-44cc-924c-2e5c42c0eb58\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.944832 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf"] Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.965850 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4659v\" (UniqueName: \"kubernetes.io/projected/c0a3e997-8709-444b-ae4e-8fc34b04cb6e-kube-api-access-4659v\") pod \"barbican-operator-controller-manager-7ddb5c749-7zdch\" (UID: \"c0a3e997-8709-444b-ae4e-8fc34b04cb6e\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.983384 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.983946 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.984729 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" Jan 20 16:46:58 crc kubenswrapper[4995]: I0120 16:46:58.993788 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.002334 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-msrxh" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.009346 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.010302 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnngw\" (UniqueName: \"kubernetes.io/projected/a3c2211e-845d-47cc-b4a5-962340b0d53c-kube-api-access-xnngw\") pod \"keystone-operator-controller-manager-767fdc4f47-dwn52\" (UID: \"a3c2211e-845d-47cc-b4a5-962340b0d53c\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.010337 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgtmk\" (UniqueName: \"kubernetes.io/projected/072647c8-2d0e-4716-bb29-a87e3ff5cd29-kube-api-access-lgtmk\") pod \"glance-operator-controller-manager-c6994669c-bxm9j\" (UID: \"072647c8-2d0e-4716-bb29-a87e3ff5cd29\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.010373 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcl24\" (UniqueName: \"kubernetes.io/projected/c8061771-759d-49d5-b88b-9d66f45277ac-kube-api-access-jcl24\") pod \"horizon-operator-controller-manager-77d5c5b54f-zs4nf\" (UID: \"c8061771-759d-49d5-b88b-9d66f45277ac\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.010405 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6m7k\" (UniqueName: \"kubernetes.io/projected/49392c07-237b-447e-a126-f06e1cbf32a2-kube-api-access-n6m7k\") pod \"heat-operator-controller-manager-594c8c9d5d-wm2kb\" (UID: \"49392c07-237b-447e-a126-f06e1cbf32a2\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.010443 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7q8t\" (UniqueName: \"kubernetes.io/projected/439ab902-28ff-48a4-81e4-93c72937e573-kube-api-access-s7q8t\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.010465 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srb5m\" (UniqueName: \"kubernetes.io/projected/f4577775-2c19-495a-95e7-1638f359b533-kube-api-access-srb5m\") pod \"ironic-operator-controller-manager-78757b4889-7p5v4\" (UID: \"f4577775-2c19-495a-95e7-1638f359b533\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.010494 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.040782 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.041757 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.044647 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.046219 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-2zvt7" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.077669 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6m7k\" (UniqueName: \"kubernetes.io/projected/49392c07-237b-447e-a126-f06e1cbf32a2-kube-api-access-n6m7k\") pod \"heat-operator-controller-manager-594c8c9d5d-wm2kb\" (UID: \"49392c07-237b-447e-a126-f06e1cbf32a2\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.094102 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgtmk\" (UniqueName: \"kubernetes.io/projected/072647c8-2d0e-4716-bb29-a87e3ff5cd29-kube-api-access-lgtmk\") pod \"glance-operator-controller-manager-c6994669c-bxm9j\" (UID: \"072647c8-2d0e-4716-bb29-a87e3ff5cd29\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.112576 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7q8t\" (UniqueName: \"kubernetes.io/projected/439ab902-28ff-48a4-81e4-93c72937e573-kube-api-access-s7q8t\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.112611 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srb5m\" (UniqueName: \"kubernetes.io/projected/f4577775-2c19-495a-95e7-1638f359b533-kube-api-access-srb5m\") pod \"ironic-operator-controller-manager-78757b4889-7p5v4\" (UID: \"f4577775-2c19-495a-95e7-1638f359b533\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.112643 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.112698 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph7cs\" (UniqueName: \"kubernetes.io/projected/93ac6eeb-0456-4cfe-8298-b8b97d09716c-kube-api-access-ph7cs\") pod \"manila-operator-controller-manager-864f6b75bf-wjrpf\" (UID: \"93ac6eeb-0456-4cfe-8298-b8b97d09716c\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.112743 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnngw\" (UniqueName: \"kubernetes.io/projected/a3c2211e-845d-47cc-b4a5-962340b0d53c-kube-api-access-xnngw\") pod \"keystone-operator-controller-manager-767fdc4f47-dwn52\" (UID: \"a3c2211e-845d-47cc-b4a5-962340b0d53c\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.112795 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcl24\" (UniqueName: \"kubernetes.io/projected/c8061771-759d-49d5-b88b-9d66f45277ac-kube-api-access-jcl24\") pod \"horizon-operator-controller-manager-77d5c5b54f-zs4nf\" (UID: \"c8061771-759d-49d5-b88b-9d66f45277ac\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.114567 4995 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.114617 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert podName:439ab902-28ff-48a4-81e4-93c72937e573 nodeName:}" failed. No retries permitted until 2026-01-20 16:46:59.614603845 +0000 UTC m=+937.859208651 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert") pod "infra-operator-controller-manager-77c48c7859-zd75z" (UID: "439ab902-28ff-48a4-81e4-93c72937e573") : secret "infra-operator-webhook-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.114981 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.147669 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcl24\" (UniqueName: \"kubernetes.io/projected/c8061771-759d-49d5-b88b-9d66f45277ac-kube-api-access-jcl24\") pod \"horizon-operator-controller-manager-77d5c5b54f-zs4nf\" (UID: \"c8061771-759d-49d5-b88b-9d66f45277ac\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.150947 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srb5m\" (UniqueName: \"kubernetes.io/projected/f4577775-2c19-495a-95e7-1638f359b533-kube-api-access-srb5m\") pod \"ironic-operator-controller-manager-78757b4889-7p5v4\" (UID: \"f4577775-2c19-495a-95e7-1638f359b533\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.151010 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.152070 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-95tbl"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.211406 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.211895 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.222569 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-tf6vn" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.231184 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-p6kwt" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.231810 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7q8t\" (UniqueName: \"kubernetes.io/projected/439ab902-28ff-48a4-81e4-93c72937e573-kube-api-access-s7q8t\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.313311 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.314055 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.315151 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkdqb\" (UniqueName: \"kubernetes.io/projected/47ec26a3-41ca-482f-b539-c9dc32af0bb0-kube-api-access-gkdqb\") pod \"mariadb-operator-controller-manager-c87fff755-qd2nk\" (UID: \"47ec26a3-41ca-482f-b539-c9dc32af0bb0\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.315228 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph7cs\" (UniqueName: \"kubernetes.io/projected/93ac6eeb-0456-4cfe-8298-b8b97d09716c-kube-api-access-ph7cs\") pod \"manila-operator-controller-manager-864f6b75bf-wjrpf\" (UID: \"93ac6eeb-0456-4cfe-8298-b8b97d09716c\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.317467 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnngw\" (UniqueName: \"kubernetes.io/projected/a3c2211e-845d-47cc-b4a5-962340b0d53c-kube-api-access-xnngw\") pod \"keystone-operator-controller-manager-767fdc4f47-dwn52\" (UID: \"a3c2211e-845d-47cc-b4a5-962340b0d53c\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.326710 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.327851 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.343004 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-vfnh7" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.363367 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph7cs\" (UniqueName: \"kubernetes.io/projected/93ac6eeb-0456-4cfe-8298-b8b97d09716c-kube-api-access-ph7cs\") pod \"manila-operator-controller-manager-864f6b75bf-wjrpf\" (UID: \"93ac6eeb-0456-4cfe-8298-b8b97d09716c\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.370935 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.381062 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-95tbl"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.411429 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.411888 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.418936 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.421709 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqlbn\" (UniqueName: \"kubernetes.io/projected/ffe39c73-665e-4de6-afb5-2e9b93419e33-kube-api-access-bqlbn\") pod \"nova-operator-controller-manager-65849867d6-95tbl\" (UID: \"ffe39c73-665e-4de6-afb5-2e9b93419e33\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.421756 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6lpf\" (UniqueName: \"kubernetes.io/projected/86d4f806-c5e4-4ce0-a859-5e104b0d5dce-kube-api-access-n6lpf\") pod \"neutron-operator-controller-manager-cb4666565-m7p7b\" (UID: \"86d4f806-c5e4-4ce0-a859-5e104b0d5dce\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.421858 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkdqb\" (UniqueName: \"kubernetes.io/projected/47ec26a3-41ca-482f-b539-c9dc32af0bb0-kube-api-access-gkdqb\") pod \"mariadb-operator-controller-manager-c87fff755-qd2nk\" (UID: \"47ec26a3-41ca-482f-b539-c9dc32af0bb0\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.427441 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.428274 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.430723 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.430877 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-jd2df" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.442469 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.446849 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.448431 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.452134 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-hdbb9" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.453250 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.457585 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.458470 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.458812 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkdqb\" (UniqueName: \"kubernetes.io/projected/47ec26a3-41ca-482f-b539-c9dc32af0bb0-kube-api-access-gkdqb\") pod \"mariadb-operator-controller-manager-c87fff755-qd2nk\" (UID: \"47ec26a3-41ca-482f-b539-c9dc32af0bb0\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.461604 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-pmmtt" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.461725 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.462702 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.464878 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-msc6q" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.475780 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.476674 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.484392 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-2mrqz" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.491015 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.507194 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.516085 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.521647 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.522803 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.523630 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf2g6\" (UniqueName: \"kubernetes.io/projected/de6fc9c2-f9a9-41fd-8cfb-b0493d823c20-kube-api-access-gf2g6\") pod \"octavia-operator-controller-manager-7fc9b76cf6-fk7x2\" (UID: \"de6fc9c2-f9a9-41fd-8cfb-b0493d823c20\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.524637 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqlbn\" (UniqueName: \"kubernetes.io/projected/ffe39c73-665e-4de6-afb5-2e9b93419e33-kube-api-access-bqlbn\") pod \"nova-operator-controller-manager-65849867d6-95tbl\" (UID: \"ffe39c73-665e-4de6-afb5-2e9b93419e33\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.524666 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6lpf\" (UniqueName: \"kubernetes.io/projected/86d4f806-c5e4-4ce0-a859-5e104b0d5dce-kube-api-access-n6lpf\") pod \"neutron-operator-controller-manager-cb4666565-m7p7b\" (UID: \"86d4f806-c5e4-4ce0-a859-5e104b0d5dce\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.525850 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.526826 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.528782 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-h6dw8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.529433 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-f8mss" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.530559 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.535360 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.543265 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.548106 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6lpf\" (UniqueName: \"kubernetes.io/projected/86d4f806-c5e4-4ce0-a859-5e104b0d5dce-kube-api-access-n6lpf\") pod \"neutron-operator-controller-manager-cb4666565-m7p7b\" (UID: \"86d4f806-c5e4-4ce0-a859-5e104b0d5dce\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.557805 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.558836 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.561005 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.561067 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fgcfd" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.561780 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.572184 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqlbn\" (UniqueName: \"kubernetes.io/projected/ffe39c73-665e-4de6-afb5-2e9b93419e33-kube-api-access-bqlbn\") pod \"nova-operator-controller-manager-65849867d6-95tbl\" (UID: \"ffe39c73-665e-4de6-afb5-2e9b93419e33\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.581682 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.601062 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.601992 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.622670 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-q5zp6" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.628888 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.628924 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbrhw\" (UniqueName: \"kubernetes.io/projected/31bd181f-39ff-4e9f-949c-8a6ed84f3f42-kube-api-access-cbrhw\") pod \"rabbitmq-cluster-operator-manager-668c99d594-jwzhh\" (UID: \"31bd181f-39ff-4e9f-949c-8a6ed84f3f42\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.628951 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.628965 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.628984 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf2g6\" (UniqueName: \"kubernetes.io/projected/de6fc9c2-f9a9-41fd-8cfb-b0493d823c20-kube-api-access-gf2g6\") pod \"octavia-operator-controller-manager-7fc9b76cf6-fk7x2\" (UID: \"de6fc9c2-f9a9-41fd-8cfb-b0493d823c20\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.629004 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsp2j\" (UniqueName: \"kubernetes.io/projected/9d2f128c-9463-4735-9bf7-91bff7148887-kube-api-access-zsp2j\") pod \"ovn-operator-controller-manager-55db956ddc-vvwk8\" (UID: \"9d2f128c-9463-4735-9bf7-91bff7148887\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.629029 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctnr2\" (UniqueName: \"kubernetes.io/projected/37f347f2-1ab4-4e49-9340-57a960ff8eb1-kube-api-access-ctnr2\") pod \"placement-operator-controller-manager-686df47fcb-6rn8v\" (UID: \"37f347f2-1ab4-4e49-9340-57a960ff8eb1\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.629043 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hlpd\" (UniqueName: \"kubernetes.io/projected/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-kube-api-access-6hlpd\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.629136 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62x8t\" (UniqueName: \"kubernetes.io/projected/c2d307fa-2be9-4f04-8ae4-f3b55e987ceb-kube-api-access-62x8t\") pod \"telemetry-operator-controller-manager-5f8f495fcf-fdpgr\" (UID: \"c2d307fa-2be9-4f04-8ae4-f3b55e987ceb\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.629168 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4mpw\" (UniqueName: \"kubernetes.io/projected/17dfb7c9-6832-48d3-ad83-91508cf85de3-kube-api-access-b4mpw\") pod \"watcher-operator-controller-manager-6db9b5db6c-29hz8\" (UID: \"17dfb7c9-6832-48d3-ad83-91508cf85de3\") " pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.629185 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8kf9\" (UniqueName: \"kubernetes.io/projected/8a04fc71-9575-4cf5-bdab-2c741002c47f-kube-api-access-v8kf9\") pod \"test-operator-controller-manager-7cd8bc9dbb-csd5m\" (UID: \"8a04fc71-9575-4cf5-bdab-2c741002c47f\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.629204 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbzt6\" (UniqueName: \"kubernetes.io/projected/d0afd012-c6e1-4a66-a8a1-9edccfdff278-kube-api-access-wbzt6\") pod \"swift-operator-controller-manager-85dd56d4cc-dxnvv\" (UID: \"d0afd012-c6e1-4a66-a8a1-9edccfdff278\") " pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.629220 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.629237 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdwlv\" (UniqueName: \"kubernetes.io/projected/50e51652-8f18-4234-b29b-85e684e63bfd-kube-api-access-pdwlv\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.629339 4995 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.629378 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert podName:439ab902-28ff-48a4-81e4-93c72937e573 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:00.629363155 +0000 UTC m=+938.873967961 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert") pod "infra-operator-controller-manager-77c48c7859-zd75z" (UID: "439ab902-28ff-48a4-81e4-93c72937e573") : secret "infra-operator-webhook-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.630128 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.669584 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.678259 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf2g6\" (UniqueName: \"kubernetes.io/projected/de6fc9c2-f9a9-41fd-8cfb-b0493d823c20-kube-api-access-gf2g6\") pod \"octavia-operator-controller-manager-7fc9b76cf6-fk7x2\" (UID: \"de6fc9c2-f9a9-41fd-8cfb-b0493d823c20\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.730399 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4mpw\" (UniqueName: \"kubernetes.io/projected/17dfb7c9-6832-48d3-ad83-91508cf85de3-kube-api-access-b4mpw\") pod \"watcher-operator-controller-manager-6db9b5db6c-29hz8\" (UID: \"17dfb7c9-6832-48d3-ad83-91508cf85de3\") " pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.730605 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8kf9\" (UniqueName: \"kubernetes.io/projected/8a04fc71-9575-4cf5-bdab-2c741002c47f-kube-api-access-v8kf9\") pod \"test-operator-controller-manager-7cd8bc9dbb-csd5m\" (UID: \"8a04fc71-9575-4cf5-bdab-2c741002c47f\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.730688 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbzt6\" (UniqueName: \"kubernetes.io/projected/d0afd012-c6e1-4a66-a8a1-9edccfdff278-kube-api-access-wbzt6\") pod \"swift-operator-controller-manager-85dd56d4cc-dxnvv\" (UID: \"d0afd012-c6e1-4a66-a8a1-9edccfdff278\") " pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.730757 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.730846 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdwlv\" (UniqueName: \"kubernetes.io/projected/50e51652-8f18-4234-b29b-85e684e63bfd-kube-api-access-pdwlv\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.730939 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbrhw\" (UniqueName: \"kubernetes.io/projected/31bd181f-39ff-4e9f-949c-8a6ed84f3f42-kube-api-access-cbrhw\") pod \"rabbitmq-cluster-operator-manager-668c99d594-jwzhh\" (UID: \"31bd181f-39ff-4e9f-949c-8a6ed84f3f42\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.731018 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.734635 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.734791 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsp2j\" (UniqueName: \"kubernetes.io/projected/9d2f128c-9463-4735-9bf7-91bff7148887-kube-api-access-zsp2j\") pod \"ovn-operator-controller-manager-55db956ddc-vvwk8\" (UID: \"9d2f128c-9463-4735-9bf7-91bff7148887\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.735014 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctnr2\" (UniqueName: \"kubernetes.io/projected/37f347f2-1ab4-4e49-9340-57a960ff8eb1-kube-api-access-ctnr2\") pod \"placement-operator-controller-manager-686df47fcb-6rn8v\" (UID: \"37f347f2-1ab4-4e49-9340-57a960ff8eb1\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.730959 4995 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.736214 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:00.236190071 +0000 UTC m=+938.480794877 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "metrics-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.731133 4995 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.736392 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert podName:50e51652-8f18-4234-b29b-85e684e63bfd nodeName:}" failed. No retries permitted until 2026-01-20 16:47:00.236378556 +0000 UTC m=+938.480983472 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" (UID: "50e51652-8f18-4234-b29b-85e684e63bfd") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.736138 4995 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: E0120 16:46:59.736430 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:00.236423817 +0000 UTC m=+938.481028613 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "webhook-server-cert" not found Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.739159 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.745152 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hlpd\" (UniqueName: \"kubernetes.io/projected/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-kube-api-access-6hlpd\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.745367 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62x8t\" (UniqueName: \"kubernetes.io/projected/c2d307fa-2be9-4f04-8ae4-f3b55e987ceb-kube-api-access-62x8t\") pod \"telemetry-operator-controller-manager-5f8f495fcf-fdpgr\" (UID: \"c2d307fa-2be9-4f04-8ae4-f3b55e987ceb\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.766932 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbzt6\" (UniqueName: \"kubernetes.io/projected/d0afd012-c6e1-4a66-a8a1-9edccfdff278-kube-api-access-wbzt6\") pod \"swift-operator-controller-manager-85dd56d4cc-dxnvv\" (UID: \"d0afd012-c6e1-4a66-a8a1-9edccfdff278\") " pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.768255 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hlpd\" (UniqueName: \"kubernetes.io/projected/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-kube-api-access-6hlpd\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.769648 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdwlv\" (UniqueName: \"kubernetes.io/projected/50e51652-8f18-4234-b29b-85e684e63bfd-kube-api-access-pdwlv\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.773468 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbrhw\" (UniqueName: \"kubernetes.io/projected/31bd181f-39ff-4e9f-949c-8a6ed84f3f42-kube-api-access-cbrhw\") pod \"rabbitmq-cluster-operator-manager-668c99d594-jwzhh\" (UID: \"31bd181f-39ff-4e9f-949c-8a6ed84f3f42\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.780526 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4mpw\" (UniqueName: \"kubernetes.io/projected/17dfb7c9-6832-48d3-ad83-91508cf85de3-kube-api-access-b4mpw\") pod \"watcher-operator-controller-manager-6db9b5db6c-29hz8\" (UID: \"17dfb7c9-6832-48d3-ad83-91508cf85de3\") " pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.791086 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctnr2\" (UniqueName: \"kubernetes.io/projected/37f347f2-1ab4-4e49-9340-57a960ff8eb1-kube-api-access-ctnr2\") pod \"placement-operator-controller-manager-686df47fcb-6rn8v\" (UID: \"37f347f2-1ab4-4e49-9340-57a960ff8eb1\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.795255 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.822143 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8kf9\" (UniqueName: \"kubernetes.io/projected/8a04fc71-9575-4cf5-bdab-2c741002c47f-kube-api-access-v8kf9\") pod \"test-operator-controller-manager-7cd8bc9dbb-csd5m\" (UID: \"8a04fc71-9575-4cf5-bdab-2c741002c47f\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.822610 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsp2j\" (UniqueName: \"kubernetes.io/projected/9d2f128c-9463-4735-9bf7-91bff7148887-kube-api-access-zsp2j\") pod \"ovn-operator-controller-manager-55db956ddc-vvwk8\" (UID: \"9d2f128c-9463-4735-9bf7-91bff7148887\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.829319 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62x8t\" (UniqueName: \"kubernetes.io/projected/c2d307fa-2be9-4f04-8ae4-f3b55e987ceb-kube-api-access-62x8t\") pod \"telemetry-operator-controller-manager-5f8f495fcf-fdpgr\" (UID: \"c2d307fa-2be9-4f04-8ae4-f3b55e987ceb\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.830549 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.855179 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-mj76w"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.883572 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz"] Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.887847 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.904278 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.915535 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.931145 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" Jan 20 16:46:59 crc kubenswrapper[4995]: I0120 16:46:59.995596 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.010513 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.061922 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.258975 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.259021 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.259139 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:00 crc kubenswrapper[4995]: E0120 16:47:00.259280 4995 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 16:47:00 crc kubenswrapper[4995]: E0120 16:47:00.259345 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:01.259331488 +0000 UTC m=+939.503936294 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "metrics-server-cert" not found Jan 20 16:47:00 crc kubenswrapper[4995]: E0120 16:47:00.259701 4995 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:00 crc kubenswrapper[4995]: E0120 16:47:00.259743 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert podName:50e51652-8f18-4234-b29b-85e684e63bfd nodeName:}" failed. No retries permitted until 2026-01-20 16:47:01.259735579 +0000 UTC m=+939.504340385 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" (UID: "50e51652-8f18-4234-b29b-85e684e63bfd") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:00 crc kubenswrapper[4995]: E0120 16:47:00.259777 4995 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 16:47:00 crc kubenswrapper[4995]: E0120 16:47:00.259811 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:01.259805891 +0000 UTC m=+939.504410697 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "webhook-server-cert" not found Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.270583 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb"] Jan 20 16:47:00 crc kubenswrapper[4995]: W0120 16:47:00.302239 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49392c07_237b_447e_a126_f06e1cbf32a2.slice/crio-cf167d1e3dc1437e3b3bd82f1a7fa094c83e53e6560c53f0918a12d3543335b7 WatchSource:0}: Error finding container cf167d1e3dc1437e3b3bd82f1a7fa094c83e53e6560c53f0918a12d3543335b7: Status 404 returned error can't find the container with id cf167d1e3dc1437e3b3bd82f1a7fa094c83e53e6560c53f0918a12d3543335b7 Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.426297 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch"] Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.572412 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.572814 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:47:00 crc kubenswrapper[4995]: E0120 16:47:00.677008 4995 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 16:47:00 crc kubenswrapper[4995]: E0120 16:47:00.677090 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert podName:439ab902-28ff-48a4-81e4-93c72937e573 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:02.677058495 +0000 UTC m=+940.921663301 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert") pod "infra-operator-controller-manager-77c48c7859-zd75z" (UID: "439ab902-28ff-48a4-81e4-93c72937e573") : secret "infra-operator-webhook-server-cert" not found Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.677087 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.684530 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" event={"ID":"49392c07-237b-447e-a126-f06e1cbf32a2","Type":"ContainerStarted","Data":"cf167d1e3dc1437e3b3bd82f1a7fa094c83e53e6560c53f0918a12d3543335b7"} Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.685437 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" event={"ID":"3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a","Type":"ContainerStarted","Data":"87df3fd3be96f719ac46f3a5a52a0e4db04509ef7f0fc41abee7321172671cf2"} Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.686259 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" event={"ID":"9f302bf3-1501-44cc-924c-2e5c42c0eb58","Type":"ContainerStarted","Data":"b95684491df5ffb3dee84540f6a0c4ed789c4a007afafaf1b095d214c818bc7e"} Jan 20 16:47:00 crc kubenswrapper[4995]: I0120 16:47:00.687143 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" event={"ID":"c0a3e997-8709-444b-ae4e-8fc34b04cb6e","Type":"ContainerStarted","Data":"4c60c5b699bab8f593b5ce0e9d04c2d914242549467ed80d1750ef9d42d70c91"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.023563 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.071128 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m"] Jan 20 16:47:01 crc kubenswrapper[4995]: W0120 16:47:01.090794 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86d4f806_c5e4_4ce0_a859_5e104b0d5dce.slice/crio-c242ef056a74fe634da6b6eb295f1ab475ded04f9fc4fec0e4e1bb08669b6fb3 WatchSource:0}: Error finding container c242ef056a74fe634da6b6eb295f1ab475ded04f9fc4fec0e4e1bb08669b6fb3: Status 404 returned error can't find the container with id c242ef056a74fe634da6b6eb295f1ab475ded04f9fc4fec0e4e1bb08669b6fb3 Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.114704 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.127495 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.138004 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk"] Jan 20 16:47:01 crc kubenswrapper[4995]: W0120 16:47:01.140797 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37f347f2_1ab4_4e49_9340_57a960ff8eb1.slice/crio-0f28d27441ddcd3d8f992fd9b66862345fafec41dd986026130061cb5ccbb4cd WatchSource:0}: Error finding container 0f28d27441ddcd3d8f992fd9b66862345fafec41dd986026130061cb5ccbb4cd: Status 404 returned error can't find the container with id 0f28d27441ddcd3d8f992fd9b66862345fafec41dd986026130061cb5ccbb4cd Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.148007 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v"] Jan 20 16:47:01 crc kubenswrapper[4995]: W0120 16:47:01.151305 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8061771_759d_49d5_b88b_9d66f45277ac.slice/crio-9ff261ec23282e152b3753bcd5243fb538bb7b8d68d8cc79bfc827f43db468f3 WatchSource:0}: Error finding container 9ff261ec23282e152b3753bcd5243fb538bb7b8d68d8cc79bfc827f43db468f3: Status 404 returned error can't find the container with id 9ff261ec23282e152b3753bcd5243fb538bb7b8d68d8cc79bfc827f43db468f3 Jan 20 16:47:01 crc kubenswrapper[4995]: W0120 16:47:01.156581 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31bd181f_39ff_4e9f_949c_8a6ed84f3f42.slice/crio-486f60bd4c88648f462bc6b197d3e9e02ebde536f30fb44499100e71ce9978ee WatchSource:0}: Error finding container 486f60bd4c88648f462bc6b197d3e9e02ebde536f30fb44499100e71ce9978ee: Status 404 returned error can't find the container with id 486f60bd4c88648f462bc6b197d3e9e02ebde536f30fb44499100e71ce9978ee Jan 20 16:47:01 crc kubenswrapper[4995]: W0120 16:47:01.163424 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3c2211e_845d_47cc_b4a5_962340b0d53c.slice/crio-d6eaaeb7919ac796d63dcd02c483d3f70a0174e5ab6e35b5c56b0c9e5b6533e5 WatchSource:0}: Error finding container d6eaaeb7919ac796d63dcd02c483d3f70a0174e5ab6e35b5c56b0c9e5b6533e5: Status 404 returned error can't find the container with id d6eaaeb7919ac796d63dcd02c483d3f70a0174e5ab6e35b5c56b0c9e5b6533e5 Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.167615 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf"] Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.168945 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:393d7567eef4fd05af625389f5a7384c6bb75108b21b06183f1f5e33aac5417e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xnngw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-767fdc4f47-dwn52_openstack-operators(a3c2211e-845d-47cc-b4a5-962340b0d53c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.169144 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:9404536bf7cb7c3818e1a0f92b53e4d7c02fe7942324f32894106f02f8fc7e92,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wbzt6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-85dd56d4cc-dxnvv_openstack-operators(d0afd012-c6e1-4a66-a8a1-9edccfdff278): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.170059 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" podUID="a3c2211e-845d-47cc-b4a5-962340b0d53c" Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.170548 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" podUID="d0afd012-c6e1-4a66-a8a1-9edccfdff278" Jan 20 16:47:01 crc kubenswrapper[4995]: W0120 16:47:01.171548 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17dfb7c9_6832_48d3_ad83_91508cf85de3.slice/crio-b429d5d358c5f1750d7021a47dc0bca6ce8732a3986150aef1d6ec3c6f8f7b6d WatchSource:0}: Error finding container b429d5d358c5f1750d7021a47dc0bca6ce8732a3986150aef1d6ec3c6f8f7b6d: Status 404 returned error can't find the container with id b429d5d358c5f1750d7021a47dc0bca6ce8732a3986150aef1d6ec3c6f8f7b6d Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.178879 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d69a68cdac59165797daf1064f3a3b4b14b546bf1c7254070a7ed1238998c028,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lgtmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-c6994669c-bxm9j_openstack-operators(072647c8-2d0e-4716-bb29-a87e3ff5cd29): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.179007 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.51:5001/openstack-k8s-operators/watcher-operator:111af6c57cd1be8032328ced6eb7a058a201c4b1,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b4mpw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-6db9b5db6c-29hz8_openstack-operators(17dfb7c9-6832-48d3-ad83-91508cf85de3): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.180117 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" podUID="17dfb7c9-6832-48d3-ad83-91508cf85de3" Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.180168 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" podUID="072647c8-2d0e-4716-bb29-a87e3ff5cd29" Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.183486 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-95tbl"] Jan 20 16:47:01 crc kubenswrapper[4995]: W0120 16:47:01.185473 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc2d307fa_2be9_4f04_8ae4_f3b55e987ceb.slice/crio-ff3cb601be23df963344d39ba2ed7fc3c7a25504dd9c57ad5bc6c081f03009da WatchSource:0}: Error finding container ff3cb601be23df963344d39ba2ed7fc3c7a25504dd9c57ad5bc6c081f03009da: Status 404 returned error can't find the container with id ff3cb601be23df963344d39ba2ed7fc3c7a25504dd9c57ad5bc6c081f03009da Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.188462 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:2e89109f5db66abf1afd15ef59bda35a53db40c5e59e020579ac5aa0acea1843,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-62x8t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-5f8f495fcf-fdpgr_openstack-operators(c2d307fa-2be9-4f04-8ae4-f3b55e987ceb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.189640 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" podUID="c2d307fa-2be9-4f04-8ae4-f3b55e987ceb" Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.206501 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.219051 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.224440 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.229185 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.233705 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.238717 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.243274 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j"] Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.285454 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.285541 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.285570 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.285670 4995 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.285701 4995 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.285755 4995 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.285770 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:03.285746771 +0000 UTC m=+941.530351587 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "webhook-server-cert" not found Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.285917 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:03.285864034 +0000 UTC m=+941.530468900 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "metrics-server-cert" not found Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.285947 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert podName:50e51652-8f18-4234-b29b-85e684e63bfd nodeName:}" failed. No retries permitted until 2026-01-20 16:47:03.285936156 +0000 UTC m=+941.530541082 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" (UID: "50e51652-8f18-4234-b29b-85e684e63bfd") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.484523 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2"] Jan 20 16:47:01 crc kubenswrapper[4995]: W0120 16:47:01.492291 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde6fc9c2_f9a9_41fd_8cfb_b0493d823c20.slice/crio-e4e228ad449e749ee7c7560c8891f79df564e29eae4768eb3a41ccd275efaa13 WatchSource:0}: Error finding container e4e228ad449e749ee7c7560c8891f79df564e29eae4768eb3a41ccd275efaa13: Status 404 returned error can't find the container with id e4e228ad449e749ee7c7560c8891f79df564e29eae4768eb3a41ccd275efaa13 Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.696062 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" event={"ID":"17dfb7c9-6832-48d3-ad83-91508cf85de3","Type":"ContainerStarted","Data":"b429d5d358c5f1750d7021a47dc0bca6ce8732a3986150aef1d6ec3c6f8f7b6d"} Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.697291 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.51:5001/openstack-k8s-operators/watcher-operator:111af6c57cd1be8032328ced6eb7a058a201c4b1\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" podUID="17dfb7c9-6832-48d3-ad83-91508cf85de3" Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.699879 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" event={"ID":"47ec26a3-41ca-482f-b539-c9dc32af0bb0","Type":"ContainerStarted","Data":"1b9b3ab8b1749ae59c8d2aaa5b69ccfb09330acd815d8fe350c403003fe3c357"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.701794 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" event={"ID":"93ac6eeb-0456-4cfe-8298-b8b97d09716c","Type":"ContainerStarted","Data":"0573b2cb5eeab58cc31bfd7b6a78cb3f1f4c7838e633c94ff2bac50ce2e787b3"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.702666 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" event={"ID":"d0afd012-c6e1-4a66-a8a1-9edccfdff278","Type":"ContainerStarted","Data":"dc0d70b3aad1614d8f8c93c4a654732edc40d38efc4f7495d4b9f4ab5d32e49d"} Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.706828 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:9404536bf7cb7c3818e1a0f92b53e4d7c02fe7942324f32894106f02f8fc7e92\\\"\"" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" podUID="d0afd012-c6e1-4a66-a8a1-9edccfdff278" Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.707213 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" event={"ID":"8a04fc71-9575-4cf5-bdab-2c741002c47f","Type":"ContainerStarted","Data":"907fe4d1603cd95fdf30fd1982c790fd4443ccf89d53cdbbe4645ed74289243c"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.715833 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" event={"ID":"37f347f2-1ab4-4e49-9340-57a960ff8eb1","Type":"ContainerStarted","Data":"0f28d27441ddcd3d8f992fd9b66862345fafec41dd986026130061cb5ccbb4cd"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.719106 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" event={"ID":"f4577775-2c19-495a-95e7-1638f359b533","Type":"ContainerStarted","Data":"03901ec0e82058ef0acd79ab17e167b5cb807efa2dbe8c6420c47dc354597172"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.720254 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" event={"ID":"31bd181f-39ff-4e9f-949c-8a6ed84f3f42","Type":"ContainerStarted","Data":"486f60bd4c88648f462bc6b197d3e9e02ebde536f30fb44499100e71ce9978ee"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.721858 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" event={"ID":"de6fc9c2-f9a9-41fd-8cfb-b0493d823c20","Type":"ContainerStarted","Data":"e4e228ad449e749ee7c7560c8891f79df564e29eae4768eb3a41ccd275efaa13"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.722661 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" event={"ID":"a3c2211e-845d-47cc-b4a5-962340b0d53c","Type":"ContainerStarted","Data":"d6eaaeb7919ac796d63dcd02c483d3f70a0174e5ab6e35b5c56b0c9e5b6533e5"} Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.741371 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:393d7567eef4fd05af625389f5a7384c6bb75108b21b06183f1f5e33aac5417e\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" podUID="a3c2211e-845d-47cc-b4a5-962340b0d53c" Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.778303 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" event={"ID":"86d4f806-c5e4-4ce0-a859-5e104b0d5dce","Type":"ContainerStarted","Data":"c242ef056a74fe634da6b6eb295f1ab475ded04f9fc4fec0e4e1bb08669b6fb3"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.812344 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" event={"ID":"ffe39c73-665e-4de6-afb5-2e9b93419e33","Type":"ContainerStarted","Data":"632f7f0e83d07cfbbbb0840b4c54e53c1c15a20393e9c82192b82e35eaef460b"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.843400 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" event={"ID":"c8061771-759d-49d5-b88b-9d66f45277ac","Type":"ContainerStarted","Data":"9ff261ec23282e152b3753bcd5243fb538bb7b8d68d8cc79bfc827f43db468f3"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.846859 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" event={"ID":"c2d307fa-2be9-4f04-8ae4-f3b55e987ceb","Type":"ContainerStarted","Data":"ff3cb601be23df963344d39ba2ed7fc3c7a25504dd9c57ad5bc6c081f03009da"} Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.852816 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:2e89109f5db66abf1afd15ef59bda35a53db40c5e59e020579ac5aa0acea1843\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" podUID="c2d307fa-2be9-4f04-8ae4-f3b55e987ceb" Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.864827 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" event={"ID":"9d2f128c-9463-4735-9bf7-91bff7148887","Type":"ContainerStarted","Data":"e7a2e909a91993b74ce93a62325f13e7635d330dfecd2a4e39d7d825dc238f12"} Jan 20 16:47:01 crc kubenswrapper[4995]: I0120 16:47:01.879234 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" event={"ID":"072647c8-2d0e-4716-bb29-a87e3ff5cd29","Type":"ContainerStarted","Data":"49318eac2d906168c73f960af93cd4bf1e335a6f8e251ab695897c811aa647b2"} Jan 20 16:47:01 crc kubenswrapper[4995]: E0120 16:47:01.893342 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:d69a68cdac59165797daf1064f3a3b4b14b546bf1c7254070a7ed1238998c028\\\"\"" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" podUID="072647c8-2d0e-4716-bb29-a87e3ff5cd29" Jan 20 16:47:02 crc kubenswrapper[4995]: I0120 16:47:02.717064 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:47:02 crc kubenswrapper[4995]: E0120 16:47:02.717295 4995 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 16:47:02 crc kubenswrapper[4995]: E0120 16:47:02.717593 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert podName:439ab902-28ff-48a4-81e4-93c72937e573 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:06.717574798 +0000 UTC m=+944.962179604 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert") pod "infra-operator-controller-manager-77c48c7859-zd75z" (UID: "439ab902-28ff-48a4-81e4-93c72937e573") : secret "infra-operator-webhook-server-cert" not found Jan 20 16:47:02 crc kubenswrapper[4995]: E0120 16:47:02.916884 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.51:5001/openstack-k8s-operators/watcher-operator:111af6c57cd1be8032328ced6eb7a058a201c4b1\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" podUID="17dfb7c9-6832-48d3-ad83-91508cf85de3" Jan 20 16:47:02 crc kubenswrapper[4995]: E0120 16:47:02.917336 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:9404536bf7cb7c3818e1a0f92b53e4d7c02fe7942324f32894106f02f8fc7e92\\\"\"" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" podUID="d0afd012-c6e1-4a66-a8a1-9edccfdff278" Jan 20 16:47:02 crc kubenswrapper[4995]: E0120 16:47:02.917375 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:d69a68cdac59165797daf1064f3a3b4b14b546bf1c7254070a7ed1238998c028\\\"\"" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" podUID="072647c8-2d0e-4716-bb29-a87e3ff5cd29" Jan 20 16:47:02 crc kubenswrapper[4995]: E0120 16:47:02.917410 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:2e89109f5db66abf1afd15ef59bda35a53db40c5e59e020579ac5aa0acea1843\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" podUID="c2d307fa-2be9-4f04-8ae4-f3b55e987ceb" Jan 20 16:47:02 crc kubenswrapper[4995]: E0120 16:47:02.917898 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:393d7567eef4fd05af625389f5a7384c6bb75108b21b06183f1f5e33aac5417e\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" podUID="a3c2211e-845d-47cc-b4a5-962340b0d53c" Jan 20 16:47:03 crc kubenswrapper[4995]: I0120 16:47:03.338131 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:03 crc kubenswrapper[4995]: E0120 16:47:03.338264 4995 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 16:47:03 crc kubenswrapper[4995]: I0120 16:47:03.338366 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:03 crc kubenswrapper[4995]: I0120 16:47:03.338390 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:03 crc kubenswrapper[4995]: E0120 16:47:03.338443 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:07.338389203 +0000 UTC m=+945.582994009 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "metrics-server-cert" not found Jan 20 16:47:03 crc kubenswrapper[4995]: E0120 16:47:03.338521 4995 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:03 crc kubenswrapper[4995]: E0120 16:47:03.338582 4995 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 16:47:03 crc kubenswrapper[4995]: E0120 16:47:03.338595 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert podName:50e51652-8f18-4234-b29b-85e684e63bfd nodeName:}" failed. No retries permitted until 2026-01-20 16:47:07.338576968 +0000 UTC m=+945.583181774 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" (UID: "50e51652-8f18-4234-b29b-85e684e63bfd") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:03 crc kubenswrapper[4995]: E0120 16:47:03.338653 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:07.338619909 +0000 UTC m=+945.583224715 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "webhook-server-cert" not found Jan 20 16:47:04 crc kubenswrapper[4995]: E0120 16:47:04.782207 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda01a294_6f73_4389_8117_a857e195a1c8.slice/crio-bbdaa6af9ba2b78f273962105d19a2ecd01bed7b35dcafcedd010076a2e05496.scope\": RecentStats: unable to find data in memory cache]" Jan 20 16:47:06 crc kubenswrapper[4995]: I0120 16:47:06.786625 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:47:06 crc kubenswrapper[4995]: E0120 16:47:06.786858 4995 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 16:47:06 crc kubenswrapper[4995]: E0120 16:47:06.787322 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert podName:439ab902-28ff-48a4-81e4-93c72937e573 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:14.787299499 +0000 UTC m=+953.031904355 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert") pod "infra-operator-controller-manager-77c48c7859-zd75z" (UID: "439ab902-28ff-48a4-81e4-93c72937e573") : secret "infra-operator-webhook-server-cert" not found Jan 20 16:47:07 crc kubenswrapper[4995]: I0120 16:47:07.397037 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:07 crc kubenswrapper[4995]: I0120 16:47:07.397128 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:07 crc kubenswrapper[4995]: I0120 16:47:07.397146 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:07 crc kubenswrapper[4995]: E0120 16:47:07.397220 4995 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 16:47:07 crc kubenswrapper[4995]: E0120 16:47:07.397267 4995 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 16:47:07 crc kubenswrapper[4995]: E0120 16:47:07.397303 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:15.39728198 +0000 UTC m=+953.641886846 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "metrics-server-cert" not found Jan 20 16:47:07 crc kubenswrapper[4995]: E0120 16:47:07.397323 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:15.397313811 +0000 UTC m=+953.641918697 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "webhook-server-cert" not found Jan 20 16:47:07 crc kubenswrapper[4995]: E0120 16:47:07.397331 4995 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:07 crc kubenswrapper[4995]: E0120 16:47:07.397360 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert podName:50e51652-8f18-4234-b29b-85e684e63bfd nodeName:}" failed. No retries permitted until 2026-01-20 16:47:15.397345952 +0000 UTC m=+953.641950758 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" (UID: "50e51652-8f18-4234-b29b-85e684e63bfd") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:13 crc kubenswrapper[4995]: E0120 16:47:13.341174 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:ddb59f1a8e3fd0d641405e371e33b3d8c913af08e40e84f390e7e06f0a7f3488" Jan 20 16:47:13 crc kubenswrapper[4995]: E0120 16:47:13.342364 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:ddb59f1a8e3fd0d641405e371e33b3d8c913af08e40e84f390e7e06f0a7f3488,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qbhj2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-9b68f5989-zgvcz_openstack-operators(3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:47:13 crc kubenswrapper[4995]: E0120 16:47:13.343558 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" podUID="3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a" Jan 20 16:47:13 crc kubenswrapper[4995]: E0120 16:47:13.909190 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492" Jan 20 16:47:13 crc kubenswrapper[4995]: E0120 16:47:13.909362 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n6m7k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-594c8c9d5d-wm2kb_openstack-operators(49392c07-237b-447e-a126-f06e1cbf32a2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:47:13 crc kubenswrapper[4995]: E0120 16:47:13.910582 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" podUID="49392c07-237b-447e-a126-f06e1cbf32a2" Jan 20 16:47:13 crc kubenswrapper[4995]: I0120 16:47:13.991516 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 16:47:13 crc kubenswrapper[4995]: E0120 16:47:13.991619 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492\\\"\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" podUID="49392c07-237b-447e-a126-f06e1cbf32a2" Jan 20 16:47:13 crc kubenswrapper[4995]: E0120 16:47:13.991996 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:ddb59f1a8e3fd0d641405e371e33b3d8c913af08e40e84f390e7e06f0a7f3488\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" podUID="3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a" Jan 20 16:47:14 crc kubenswrapper[4995]: E0120 16:47:14.422037 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:f0634d8cf7c2c2919ca248a6883ce43d6ae4ac59252c987a5cfe17643fe7d38a" Jan 20 16:47:14 crc kubenswrapper[4995]: E0120 16:47:14.422298 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:f0634d8cf7c2c2919ca248a6883ce43d6ae4ac59252c987a5cfe17643fe7d38a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4659v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7ddb5c749-7zdch_openstack-operators(c0a3e997-8709-444b-ae4e-8fc34b04cb6e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:47:14 crc kubenswrapper[4995]: E0120 16:47:14.425248 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" podUID="c0a3e997-8709-444b-ae4e-8fc34b04cb6e" Jan 20 16:47:14 crc kubenswrapper[4995]: I0120 16:47:14.805393 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:47:14 crc kubenswrapper[4995]: E0120 16:47:14.805504 4995 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 16:47:14 crc kubenswrapper[4995]: E0120 16:47:14.805938 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert podName:439ab902-28ff-48a4-81e4-93c72937e573 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:30.805874272 +0000 UTC m=+969.050479168 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert") pod "infra-operator-controller-manager-77c48c7859-zd75z" (UID: "439ab902-28ff-48a4-81e4-93c72937e573") : secret "infra-operator-webhook-server-cert" not found Jan 20 16:47:14 crc kubenswrapper[4995]: E0120 16:47:14.933441 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf" Jan 20 16:47:14 crc kubenswrapper[4995]: E0120 16:47:14.933622 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zsp2j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-55db956ddc-vvwk8_openstack-operators(9d2f128c-9463-4735-9bf7-91bff7148887): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:47:14 crc kubenswrapper[4995]: E0120 16:47:14.934749 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" podUID="9d2f128c-9463-4735-9bf7-91bff7148887" Jan 20 16:47:14 crc kubenswrapper[4995]: E0120 16:47:14.955560 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda01a294_6f73_4389_8117_a857e195a1c8.slice/crio-bbdaa6af9ba2b78f273962105d19a2ecd01bed7b35dcafcedd010076a2e05496.scope\": RecentStats: unable to find data in memory cache]" Jan 20 16:47:15 crc kubenswrapper[4995]: E0120 16:47:15.003510 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:f0634d8cf7c2c2919ca248a6883ce43d6ae4ac59252c987a5cfe17643fe7d38a\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" podUID="c0a3e997-8709-444b-ae4e-8fc34b04cb6e" Jan 20 16:47:15 crc kubenswrapper[4995]: E0120 16:47:15.003542 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" podUID="9d2f128c-9463-4735-9bf7-91bff7148887" Jan 20 16:47:15 crc kubenswrapper[4995]: I0120 16:47:15.418026 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:15 crc kubenswrapper[4995]: I0120 16:47:15.418095 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:15 crc kubenswrapper[4995]: I0120 16:47:15.418114 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:15 crc kubenswrapper[4995]: E0120 16:47:15.418192 4995 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 16:47:15 crc kubenswrapper[4995]: E0120 16:47:15.418225 4995 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:15 crc kubenswrapper[4995]: E0120 16:47:15.418226 4995 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 16:47:15 crc kubenswrapper[4995]: E0120 16:47:15.418253 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:31.418234927 +0000 UTC m=+969.662839733 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "metrics-server-cert" not found Jan 20 16:47:15 crc kubenswrapper[4995]: E0120 16:47:15.418272 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs podName:bb15a8a1-9d6b-4032-9ecb-71719f2b3d91 nodeName:}" failed. No retries permitted until 2026-01-20 16:47:31.418261597 +0000 UTC m=+969.662866403 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs") pod "openstack-operator-controller-manager-7696897b84-8gt6d" (UID: "bb15a8a1-9d6b-4032-9ecb-71719f2b3d91") : secret "webhook-server-cert" not found Jan 20 16:47:15 crc kubenswrapper[4995]: E0120 16:47:15.418282 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert podName:50e51652-8f18-4234-b29b-85e684e63bfd nodeName:}" failed. No retries permitted until 2026-01-20 16:47:31.418277788 +0000 UTC m=+969.662882594 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" (UID: "50e51652-8f18-4234-b29b-85e684e63bfd") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 16:47:24 crc kubenswrapper[4995]: E0120 16:47:24.086848 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729" Jan 20 16:47:24 crc kubenswrapper[4995]: E0120 16:47:24.087514 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gf2g6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7fc9b76cf6-fk7x2_openstack-operators(de6fc9c2-f9a9-41fd-8cfb-b0493d823c20): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:47:24 crc kubenswrapper[4995]: E0120 16:47:24.088736 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" podUID="de6fc9c2-f9a9-41fd-8cfb-b0493d823c20" Jan 20 16:47:24 crc kubenswrapper[4995]: E0120 16:47:24.613180 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:146961cac3291daf96c1ca2bc7bd52bc94d1f4787a0770e23205c2c9beb0d737" Jan 20 16:47:24 crc kubenswrapper[4995]: E0120 16:47:24.613374 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:146961cac3291daf96c1ca2bc7bd52bc94d1f4787a0770e23205c2c9beb0d737,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ctnr2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-686df47fcb-6rn8v_openstack-operators(37f347f2-1ab4-4e49-9340-57a960ff8eb1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:47:24 crc kubenswrapper[4995]: E0120 16:47:24.614560 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" podUID="37f347f2-1ab4-4e49-9340-57a960ff8eb1" Jan 20 16:47:25 crc kubenswrapper[4995]: E0120 16:47:25.033327 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 20 16:47:25 crc kubenswrapper[4995]: E0120 16:47:25.033473 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cbrhw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-jwzhh_openstack-operators(31bd181f-39ff-4e9f-949c-8a6ed84f3f42): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:47:25 crc kubenswrapper[4995]: E0120 16:47:25.034559 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" podUID="31bd181f-39ff-4e9f-949c-8a6ed84f3f42" Jan 20 16:47:25 crc kubenswrapper[4995]: E0120 16:47:25.081424 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:146961cac3291daf96c1ca2bc7bd52bc94d1f4787a0770e23205c2c9beb0d737\\\"\"" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" podUID="37f347f2-1ab4-4e49-9340-57a960ff8eb1" Jan 20 16:47:25 crc kubenswrapper[4995]: E0120 16:47:25.081431 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" podUID="de6fc9c2-f9a9-41fd-8cfb-b0493d823c20" Jan 20 16:47:25 crc kubenswrapper[4995]: E0120 16:47:25.081624 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" podUID="31bd181f-39ff-4e9f-949c-8a6ed84f3f42" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.086206 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" event={"ID":"86d4f806-c5e4-4ce0-a859-5e104b0d5dce","Type":"ContainerStarted","Data":"294d45a7d2fcc27f74c0404a3316820f766543b3650504b2e46422b79825a4c4"} Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.086668 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.089224 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" event={"ID":"9f302bf3-1501-44cc-924c-2e5c42c0eb58","Type":"ContainerStarted","Data":"b8c503f352b02831bad7ca03caaa4942cafedbd57f2633cf7fb26919fdf32ad8"} Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.089306 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.091802 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" event={"ID":"47ec26a3-41ca-482f-b539-c9dc32af0bb0","Type":"ContainerStarted","Data":"9fd7858c6581f52e34713a0c7bbcab4c63087c2e8bc6f732688611a14daff70c"} Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.092105 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.093335 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" event={"ID":"93ac6eeb-0456-4cfe-8298-b8b97d09716c","Type":"ContainerStarted","Data":"c38fd9a0f2736cfce482c7d30c82eceae02029b3494cc62d5128f0debdc87aa4"} Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.093465 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.096165 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" event={"ID":"f4577775-2c19-495a-95e7-1638f359b533","Type":"ContainerStarted","Data":"c81f5a24cae59f3f9b32f6d5fd0c66fc4a5e2a3e924c2695671998f9c86bb289"} Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.096317 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.098336 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" event={"ID":"8a04fc71-9575-4cf5-bdab-2c741002c47f","Type":"ContainerStarted","Data":"16bf387f3fd8a9fcffe3be99f38ebd1a2c4e94048ef8e44d9db65afbed3b84f2"} Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.098463 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.100940 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" event={"ID":"ffe39c73-665e-4de6-afb5-2e9b93419e33","Type":"ContainerStarted","Data":"78d83311b0be5239abf7a7cef7dc52577fc93225cdae9e491d7840ed21fbb3ea"} Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.101268 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.104244 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" podStartSLOduration=3.188315658 podStartE2EDuration="27.104230444s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.114541759 +0000 UTC m=+939.359146565" lastFinishedPulling="2026-01-20 16:47:25.030456545 +0000 UTC m=+963.275061351" observedRunningTime="2026-01-20 16:47:26.102322802 +0000 UTC m=+964.346927608" watchObservedRunningTime="2026-01-20 16:47:26.104230444 +0000 UTC m=+964.348835250" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.107762 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" event={"ID":"c8061771-759d-49d5-b88b-9d66f45277ac","Type":"ContainerStarted","Data":"900c7e44f8dd4e76377acf976f2eff57a48b56e0e717d45d788183d8afd8eabb"} Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.108247 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.124832 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" podStartSLOduration=4.169533748 podStartE2EDuration="28.124818172s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.075303555 +0000 UTC m=+939.319908351" lastFinishedPulling="2026-01-20 16:47:25.030587969 +0000 UTC m=+963.275192775" observedRunningTime="2026-01-20 16:47:26.118707696 +0000 UTC m=+964.363312502" watchObservedRunningTime="2026-01-20 16:47:26.124818172 +0000 UTC m=+964.369422978" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.150768 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" podStartSLOduration=4.284166637 podStartE2EDuration="28.150749555s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.163652911 +0000 UTC m=+939.408257717" lastFinishedPulling="2026-01-20 16:47:25.030235819 +0000 UTC m=+963.274840635" observedRunningTime="2026-01-20 16:47:26.148834404 +0000 UTC m=+964.393439220" watchObservedRunningTime="2026-01-20 16:47:26.150749555 +0000 UTC m=+964.395354361" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.168665 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" podStartSLOduration=3.233608185 podStartE2EDuration="27.168649361s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.093745655 +0000 UTC m=+939.338350461" lastFinishedPulling="2026-01-20 16:47:25.028786831 +0000 UTC m=+963.273391637" observedRunningTime="2026-01-20 16:47:26.164887758 +0000 UTC m=+964.409492564" watchObservedRunningTime="2026-01-20 16:47:26.168649361 +0000 UTC m=+964.413254167" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.184727 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" podStartSLOduration=4.318049255 podStartE2EDuration="28.184710126s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.163293881 +0000 UTC m=+939.407898687" lastFinishedPulling="2026-01-20 16:47:25.029954752 +0000 UTC m=+963.274559558" observedRunningTime="2026-01-20 16:47:26.183589036 +0000 UTC m=+964.428193852" watchObservedRunningTime="2026-01-20 16:47:26.184710126 +0000 UTC m=+964.429314932" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.205134 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" podStartSLOduration=3.32813921 podStartE2EDuration="27.20511948s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.152583851 +0000 UTC m=+939.397188657" lastFinishedPulling="2026-01-20 16:47:25.029564121 +0000 UTC m=+963.274168927" observedRunningTime="2026-01-20 16:47:26.202812537 +0000 UTC m=+964.447417353" watchObservedRunningTime="2026-01-20 16:47:26.20511948 +0000 UTC m=+964.449724286" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.238843 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" podStartSLOduration=3.147164844 podStartE2EDuration="28.238821634s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:46:59.937902781 +0000 UTC m=+938.182507587" lastFinishedPulling="2026-01-20 16:47:25.029559571 +0000 UTC m=+963.274164377" observedRunningTime="2026-01-20 16:47:26.220433515 +0000 UTC m=+964.465038321" watchObservedRunningTime="2026-01-20 16:47:26.238821634 +0000 UTC m=+964.483426450" Jan 20 16:47:26 crc kubenswrapper[4995]: I0120 16:47:26.254941 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" podStartSLOduration=4.378914755 podStartE2EDuration="28.2549236s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.154588494 +0000 UTC m=+939.399193310" lastFinishedPulling="2026-01-20 16:47:25.030597349 +0000 UTC m=+963.275202155" observedRunningTime="2026-01-20 16:47:26.25050994 +0000 UTC m=+964.495114746" watchObservedRunningTime="2026-01-20 16:47:26.2549236 +0000 UTC m=+964.499528396" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.134576 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" event={"ID":"d0afd012-c6e1-4a66-a8a1-9edccfdff278","Type":"ContainerStarted","Data":"8fc71f50eadb2b58121b8d8c1388c054848381aeb7557dc131eb9fd40d927a4a"} Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.135286 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.136310 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" event={"ID":"49392c07-237b-447e-a126-f06e1cbf32a2","Type":"ContainerStarted","Data":"f54f831c1d0d3151f0a13a94b9e42fa82226b21e87d1663c45e4d74c20ce3607"} Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.136728 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.138045 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" event={"ID":"3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a","Type":"ContainerStarted","Data":"67ad5040ce4ea8be43370df70ed517819509bc77565c681a41601cca7945c0f8"} Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.138278 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.139461 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" event={"ID":"a3c2211e-845d-47cc-b4a5-962340b0d53c","Type":"ContainerStarted","Data":"b9bd8c0d78d680acc160e821ece6652b761bec81572bfc84fdf766fe0cf50d90"} Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.139647 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.140826 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" event={"ID":"c0a3e997-8709-444b-ae4e-8fc34b04cb6e","Type":"ContainerStarted","Data":"f62e0c2d8b07c2c59824e62d110da37e5d3f18e8a9f6034b7704a51a2bc25a52"} Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.140978 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.143968 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" event={"ID":"c2d307fa-2be9-4f04-8ae4-f3b55e987ceb","Type":"ContainerStarted","Data":"996d396cef00f56ee05f2278d8b1f5ab7032fc7fdc9cb95c2dcbd15f7f43be03"} Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.144204 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.145387 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" event={"ID":"9d2f128c-9463-4735-9bf7-91bff7148887","Type":"ContainerStarted","Data":"7cf124bd1fb95491de1bd5bce35a26c452ab675f326811cb8f6111dbde1f05ab"} Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.145506 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.147272 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" event={"ID":"072647c8-2d0e-4716-bb29-a87e3ff5cd29","Type":"ContainerStarted","Data":"2cf5e2bc8b71e582af80cec0e9cf5bb672c6d56ea9f88a8076705d508183ccc3"} Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.147480 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.148825 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" event={"ID":"17dfb7c9-6832-48d3-ad83-91508cf85de3","Type":"ContainerStarted","Data":"201febe0afcfdfbc4c9aec255ee7ef0e987f0bca748ea9e040766995dd301104"} Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.149087 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.167563 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" podStartSLOduration=3.366328724 podStartE2EDuration="31.16754872s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.169028247 +0000 UTC m=+939.413633053" lastFinishedPulling="2026-01-20 16:47:28.970248233 +0000 UTC m=+967.214853049" observedRunningTime="2026-01-20 16:47:30.163536132 +0000 UTC m=+968.408140938" watchObservedRunningTime="2026-01-20 16:47:30.16754872 +0000 UTC m=+968.412153526" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.211400 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" podStartSLOduration=3.70092964 podStartE2EDuration="32.211387059s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:47:00.461841739 +0000 UTC m=+938.706446545" lastFinishedPulling="2026-01-20 16:47:28.972299148 +0000 UTC m=+967.216903964" observedRunningTime="2026-01-20 16:47:30.21035037 +0000 UTC m=+968.454955186" watchObservedRunningTime="2026-01-20 16:47:30.211387059 +0000 UTC m=+968.455991865" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.223159 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" podStartSLOduration=3.43512017 podStartE2EDuration="31.223147698s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.18832225 +0000 UTC m=+939.432927056" lastFinishedPulling="2026-01-20 16:47:28.976349738 +0000 UTC m=+967.220954584" observedRunningTime="2026-01-20 16:47:30.222683596 +0000 UTC m=+968.467288402" watchObservedRunningTime="2026-01-20 16:47:30.223147698 +0000 UTC m=+968.467752504" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.301677 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" podStartSLOduration=2.700659672 podStartE2EDuration="31.301653747s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.075335495 +0000 UTC m=+939.319940301" lastFinishedPulling="2026-01-20 16:47:29.67632957 +0000 UTC m=+967.920934376" observedRunningTime="2026-01-20 16:47:30.265764214 +0000 UTC m=+968.510369010" watchObservedRunningTime="2026-01-20 16:47:30.301653747 +0000 UTC m=+968.546258553" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.317618 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" podStartSLOduration=3.3048139389999998 podStartE2EDuration="32.31759927s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:46:59.957479672 +0000 UTC m=+938.202084478" lastFinishedPulling="2026-01-20 16:47:28.970264983 +0000 UTC m=+967.214869809" observedRunningTime="2026-01-20 16:47:30.304616947 +0000 UTC m=+968.549221753" watchObservedRunningTime="2026-01-20 16:47:30.31759927 +0000 UTC m=+968.562204066" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.326479 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" podStartSLOduration=4.526274502 podStartE2EDuration="32.32646197s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.16878142 +0000 UTC m=+939.413386226" lastFinishedPulling="2026-01-20 16:47:28.968968878 +0000 UTC m=+967.213573694" observedRunningTime="2026-01-20 16:47:30.323535961 +0000 UTC m=+968.568140777" watchObservedRunningTime="2026-01-20 16:47:30.32646197 +0000 UTC m=+968.571066776" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.352892 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" podStartSLOduration=3.56273283 podStartE2EDuration="31.352874996s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.178955935 +0000 UTC m=+939.423560741" lastFinishedPulling="2026-01-20 16:47:28.969098071 +0000 UTC m=+967.213702907" observedRunningTime="2026-01-20 16:47:30.346920134 +0000 UTC m=+968.591524940" watchObservedRunningTime="2026-01-20 16:47:30.352874996 +0000 UTC m=+968.597479802" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.378275 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" podStartSLOduration=3.709995586 podStartE2EDuration="32.378260544s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:47:00.30403017 +0000 UTC m=+938.548634976" lastFinishedPulling="2026-01-20 16:47:28.972295118 +0000 UTC m=+967.216899934" observedRunningTime="2026-01-20 16:47:30.37586678 +0000 UTC m=+968.620471586" watchObservedRunningTime="2026-01-20 16:47:30.378260544 +0000 UTC m=+968.622865350" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.391340 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" podStartSLOduration=4.599793875 podStartE2EDuration="32.391325419s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.178779941 +0000 UTC m=+939.423384747" lastFinishedPulling="2026-01-20 16:47:28.970311445 +0000 UTC m=+967.214916291" observedRunningTime="2026-01-20 16:47:30.387169116 +0000 UTC m=+968.631773922" watchObservedRunningTime="2026-01-20 16:47:30.391325419 +0000 UTC m=+968.635930225" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.572120 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.572175 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.572232 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.572813 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ef514ad170d2e1a38aa428bd4835a847c0ca2074e5ec7e7cc5427ce30e0cd1ed"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.572874 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://ef514ad170d2e1a38aa428bd4835a847c0ca2074e5ec7e7cc5427ce30e0cd1ed" gracePeriod=600 Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.854801 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:47:30 crc kubenswrapper[4995]: I0120 16:47:30.878421 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/439ab902-28ff-48a4-81e4-93c72937e573-cert\") pod \"infra-operator-controller-manager-77c48c7859-zd75z\" (UID: \"439ab902-28ff-48a4-81e4-93c72937e573\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.002294 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-7qgmn" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.011277 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.174686 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="ef514ad170d2e1a38aa428bd4835a847c0ca2074e5ec7e7cc5427ce30e0cd1ed" exitCode=0 Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.176070 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"ef514ad170d2e1a38aa428bd4835a847c0ca2074e5ec7e7cc5427ce30e0cd1ed"} Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.176860 4995 scope.go:117] "RemoveContainer" containerID="21a51aeb68249229f8bec50af82e0400807574c3c8c35d6878a257fbb5a8baf3" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.177374 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"8ecef0f787bcc6c0229321b3bf04fd7a400236ca19aefa00a3e8afeb5931315b"} Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.292267 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z"] Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.465401 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.465654 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.465736 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.470651 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-webhook-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.470759 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bb15a8a1-9d6b-4032-9ecb-71719f2b3d91-metrics-certs\") pod \"openstack-operator-controller-manager-7696897b84-8gt6d\" (UID: \"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91\") " pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.477800 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/50e51652-8f18-4234-b29b-85e684e63bfd-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9\" (UID: \"50e51652-8f18-4234-b29b-85e684e63bfd\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.686696 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fgcfd" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.695572 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.755696 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-jd2df" Jan 20 16:47:31 crc kubenswrapper[4995]: I0120 16:47:31.763858 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:32 crc kubenswrapper[4995]: I0120 16:47:32.092101 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d"] Jan 20 16:47:32 crc kubenswrapper[4995]: I0120 16:47:32.182291 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" event={"ID":"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91","Type":"ContainerStarted","Data":"cd57869a6d2b7449eb2a4d06aff91ecfb33bbadeafb8ebf1eb80317e1d6dc416"} Jan 20 16:47:32 crc kubenswrapper[4995]: I0120 16:47:32.185552 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" event={"ID":"439ab902-28ff-48a4-81e4-93c72937e573","Type":"ContainerStarted","Data":"1f1ec9b8fd19ea2187e122d4ea0f8c38d895b864aa95664adcfbb55adcb3c1a4"} Jan 20 16:47:32 crc kubenswrapper[4995]: I0120 16:47:32.253708 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9"] Jan 20 16:47:33 crc kubenswrapper[4995]: I0120 16:47:33.197190 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" event={"ID":"50e51652-8f18-4234-b29b-85e684e63bfd","Type":"ContainerStarted","Data":"6a00bec25978d16bbe907a0387f35fc469d7ad243de6a8d7e5a5dcb2c23f68c1"} Jan 20 16:47:33 crc kubenswrapper[4995]: I0120 16:47:33.198664 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" event={"ID":"bb15a8a1-9d6b-4032-9ecb-71719f2b3d91","Type":"ContainerStarted","Data":"9730e40a893803c146c2f6dfe98ed4ab886c447701768381fc9dd63eec131fa3"} Jan 20 16:47:33 crc kubenswrapper[4995]: I0120 16:47:33.198710 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:33 crc kubenswrapper[4995]: I0120 16:47:33.224352 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" podStartSLOduration=34.224334472 podStartE2EDuration="34.224334472s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:47:33.222927384 +0000 UTC m=+971.467532210" watchObservedRunningTime="2026-01-20 16:47:33.224334472 +0000 UTC m=+971.468939278" Jan 20 16:47:34 crc kubenswrapper[4995]: I0120 16:47:34.207165 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" event={"ID":"439ab902-28ff-48a4-81e4-93c72937e573","Type":"ContainerStarted","Data":"1071044ad28740663daa5fd2c11b4b04801bea37213691663d6c708792b4501d"} Jan 20 16:47:34 crc kubenswrapper[4995]: I0120 16:47:34.207554 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:47:34 crc kubenswrapper[4995]: I0120 16:47:34.234561 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" podStartSLOduration=34.056835634 podStartE2EDuration="36.234539717s" podCreationTimestamp="2026-01-20 16:46:58 +0000 UTC" firstStartedPulling="2026-01-20 16:47:31.315233933 +0000 UTC m=+969.559838739" lastFinishedPulling="2026-01-20 16:47:33.492938016 +0000 UTC m=+971.737542822" observedRunningTime="2026-01-20 16:47:34.221281327 +0000 UTC m=+972.465886163" watchObservedRunningTime="2026-01-20 16:47:34.234539717 +0000 UTC m=+972.479144523" Jan 20 16:47:36 crc kubenswrapper[4995]: I0120 16:47:36.224243 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" event={"ID":"50e51652-8f18-4234-b29b-85e684e63bfd","Type":"ContainerStarted","Data":"df62d3423ce9c9708613b1778038dd1006d815d11644c35c340e162c61164ac4"} Jan 20 16:47:36 crc kubenswrapper[4995]: I0120 16:47:36.224900 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:36 crc kubenswrapper[4995]: I0120 16:47:36.265612 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" podStartSLOduration=34.340384849 podStartE2EDuration="37.265573623s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:32.265055279 +0000 UTC m=+970.509660085" lastFinishedPulling="2026-01-20 16:47:35.190244063 +0000 UTC m=+973.434848859" observedRunningTime="2026-01-20 16:47:36.253426804 +0000 UTC m=+974.498031630" watchObservedRunningTime="2026-01-20 16:47:36.265573623 +0000 UTC m=+974.510178429" Jan 20 16:47:38 crc kubenswrapper[4995]: I0120 16:47:38.240120 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" event={"ID":"37f347f2-1ab4-4e49-9340-57a960ff8eb1","Type":"ContainerStarted","Data":"c9f51eaf92b5ee7d4d978e50fa7d1f24083b470118d6d3c146286e5204da842a"} Jan 20 16:47:38 crc kubenswrapper[4995]: I0120 16:47:38.240701 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" Jan 20 16:47:38 crc kubenswrapper[4995]: I0120 16:47:38.255555 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" podStartSLOduration=2.856312363 podStartE2EDuration="39.255539415s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.150143054 +0000 UTC m=+939.394747860" lastFinishedPulling="2026-01-20 16:47:37.549370086 +0000 UTC m=+975.793974912" observedRunningTime="2026-01-20 16:47:38.253259043 +0000 UTC m=+976.497863849" watchObservedRunningTime="2026-01-20 16:47:38.255539415 +0000 UTC m=+976.500144221" Jan 20 16:47:38 crc kubenswrapper[4995]: I0120 16:47:38.986279 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-7zdch" Jan 20 16:47:38 crc kubenswrapper[4995]: I0120 16:47:38.986749 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-zgvcz" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.011997 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-9f958b845-mj76w" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.121597 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-wm2kb" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.317471 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-7p5v4" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.317975 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-zs4nf" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.377556 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-c6994669c-bxm9j" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.414790 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-wjrpf" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.547433 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-dwn52" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.672611 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-65849867d6-95tbl" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.741708 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-qd2nk" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.824609 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-m7p7b" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.844457 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-csd5m" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.891441 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6db9b5db6c-29hz8" Jan 20 16:47:39 crc kubenswrapper[4995]: I0120 16:47:39.942689 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-vvwk8" Jan 20 16:47:40 crc kubenswrapper[4995]: I0120 16:47:40.013793 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-85dd56d4cc-dxnvv" Jan 20 16:47:40 crc kubenswrapper[4995]: I0120 16:47:40.074172 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-fdpgr" Jan 20 16:47:40 crc kubenswrapper[4995]: I0120 16:47:40.260356 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" event={"ID":"de6fc9c2-f9a9-41fd-8cfb-b0493d823c20","Type":"ContainerStarted","Data":"89571a91542f81c82ebf16907ff37bbefa6c234c225f2a99b422d56320d956ed"} Jan 20 16:47:40 crc kubenswrapper[4995]: I0120 16:47:40.260826 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" Jan 20 16:47:40 crc kubenswrapper[4995]: I0120 16:47:40.281373 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" podStartSLOduration=3.319029831 podStartE2EDuration="41.28135319s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.493969958 +0000 UTC m=+939.738574764" lastFinishedPulling="2026-01-20 16:47:39.456293317 +0000 UTC m=+977.700898123" observedRunningTime="2026-01-20 16:47:40.273921009 +0000 UTC m=+978.518525855" watchObservedRunningTime="2026-01-20 16:47:40.28135319 +0000 UTC m=+978.525958006" Jan 20 16:47:41 crc kubenswrapper[4995]: I0120 16:47:41.021054 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-zd75z" Jan 20 16:47:41 crc kubenswrapper[4995]: I0120 16:47:41.267760 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" event={"ID":"31bd181f-39ff-4e9f-949c-8a6ed84f3f42","Type":"ContainerStarted","Data":"6a8caaea0260e1dbe89c937932366bb848d3cb0c3c88846baa0dbd02c3864c99"} Jan 20 16:47:41 crc kubenswrapper[4995]: I0120 16:47:41.294213 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-jwzhh" podStartSLOduration=2.989088724 podStartE2EDuration="42.294193056s" podCreationTimestamp="2026-01-20 16:46:59 +0000 UTC" firstStartedPulling="2026-01-20 16:47:01.16290227 +0000 UTC m=+939.407507076" lastFinishedPulling="2026-01-20 16:47:40.468006602 +0000 UTC m=+978.712611408" observedRunningTime="2026-01-20 16:47:41.288755059 +0000 UTC m=+979.533359865" watchObservedRunningTime="2026-01-20 16:47:41.294193056 +0000 UTC m=+979.538797862" Jan 20 16:47:41 crc kubenswrapper[4995]: I0120 16:47:41.701999 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7696897b84-8gt6d" Jan 20 16:47:41 crc kubenswrapper[4995]: I0120 16:47:41.778309 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9" Jan 20 16:47:49 crc kubenswrapper[4995]: I0120 16:47:49.907943 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-fk7x2" Jan 20 16:47:50 crc kubenswrapper[4995]: I0120 16:47:50.008816 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-6rn8v" Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.839489 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mvx52"] Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.840938 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.844490 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.844685 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.844780 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.845247 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-6t2d7" Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.868450 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mvx52"] Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.916904 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5s5qj"] Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.918491 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.920845 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.932049 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5s5qj"] Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.939358 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97025292-0160-4692-9cf4-e377bd46e1b2-config\") pod \"dnsmasq-dns-675f4bcbfc-mvx52\" (UID: \"97025292-0160-4692-9cf4-e377bd46e1b2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:08 crc kubenswrapper[4995]: I0120 16:48:08.939522 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pq7p6\" (UniqueName: \"kubernetes.io/projected/97025292-0160-4692-9cf4-e377bd46e1b2-kube-api-access-pq7p6\") pod \"dnsmasq-dns-675f4bcbfc-mvx52\" (UID: \"97025292-0160-4692-9cf4-e377bd46e1b2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.041290 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-config\") pod \"dnsmasq-dns-78dd6ddcc-5s5qj\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.041373 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-5s5qj\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.041425 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4m6b\" (UniqueName: \"kubernetes.io/projected/be18c2c9-a4a5-44ae-963a-97b9d4204529-kube-api-access-v4m6b\") pod \"dnsmasq-dns-78dd6ddcc-5s5qj\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.041454 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97025292-0160-4692-9cf4-e377bd46e1b2-config\") pod \"dnsmasq-dns-675f4bcbfc-mvx52\" (UID: \"97025292-0160-4692-9cf4-e377bd46e1b2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.041487 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq7p6\" (UniqueName: \"kubernetes.io/projected/97025292-0160-4692-9cf4-e377bd46e1b2-kube-api-access-pq7p6\") pod \"dnsmasq-dns-675f4bcbfc-mvx52\" (UID: \"97025292-0160-4692-9cf4-e377bd46e1b2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.042563 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97025292-0160-4692-9cf4-e377bd46e1b2-config\") pod \"dnsmasq-dns-675f4bcbfc-mvx52\" (UID: \"97025292-0160-4692-9cf4-e377bd46e1b2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.068795 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pq7p6\" (UniqueName: \"kubernetes.io/projected/97025292-0160-4692-9cf4-e377bd46e1b2-kube-api-access-pq7p6\") pod \"dnsmasq-dns-675f4bcbfc-mvx52\" (UID: \"97025292-0160-4692-9cf4-e377bd46e1b2\") " pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.142795 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-config\") pod \"dnsmasq-dns-78dd6ddcc-5s5qj\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.142916 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-5s5qj\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.142950 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4m6b\" (UniqueName: \"kubernetes.io/projected/be18c2c9-a4a5-44ae-963a-97b9d4204529-kube-api-access-v4m6b\") pod \"dnsmasq-dns-78dd6ddcc-5s5qj\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.143736 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-config\") pod \"dnsmasq-dns-78dd6ddcc-5s5qj\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.143755 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-5s5qj\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.170748 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.171041 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4m6b\" (UniqueName: \"kubernetes.io/projected/be18c2c9-a4a5-44ae-963a-97b9d4204529-kube-api-access-v4m6b\") pod \"dnsmasq-dns-78dd6ddcc-5s5qj\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.247022 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.540178 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5s5qj"] Jan 20 16:48:09 crc kubenswrapper[4995]: W0120 16:48:09.542883 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe18c2c9_a4a5_44ae_963a_97b9d4204529.slice/crio-eb025cb21fd3140e06c272a35b672f2b047a1a8b946c43ec94f3028ff1f84660 WatchSource:0}: Error finding container eb025cb21fd3140e06c272a35b672f2b047a1a8b946c43ec94f3028ff1f84660: Status 404 returned error can't find the container with id eb025cb21fd3140e06c272a35b672f2b047a1a8b946c43ec94f3028ff1f84660 Jan 20 16:48:09 crc kubenswrapper[4995]: I0120 16:48:09.622774 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mvx52"] Jan 20 16:48:09 crc kubenswrapper[4995]: W0120 16:48:09.624104 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97025292_0160_4692_9cf4_e377bd46e1b2.slice/crio-12a80fa8790315de9d49664010ee8244eaacad244c43518f5e8dfb8cf6890e97 WatchSource:0}: Error finding container 12a80fa8790315de9d49664010ee8244eaacad244c43518f5e8dfb8cf6890e97: Status 404 returned error can't find the container with id 12a80fa8790315de9d49664010ee8244eaacad244c43518f5e8dfb8cf6890e97 Jan 20 16:48:10 crc kubenswrapper[4995]: I0120 16:48:10.482529 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" event={"ID":"be18c2c9-a4a5-44ae-963a-97b9d4204529","Type":"ContainerStarted","Data":"eb025cb21fd3140e06c272a35b672f2b047a1a8b946c43ec94f3028ff1f84660"} Jan 20 16:48:10 crc kubenswrapper[4995]: I0120 16:48:10.483941 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" event={"ID":"97025292-0160-4692-9cf4-e377bd46e1b2","Type":"ContainerStarted","Data":"12a80fa8790315de9d49664010ee8244eaacad244c43518f5e8dfb8cf6890e97"} Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.712419 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mvx52"] Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.725893 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-m6tsl"] Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.727592 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.736689 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-m6tsl"] Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.882100 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nqwz\" (UniqueName: \"kubernetes.io/projected/468dedb3-3ad1-4df5-9527-2d9e35f7609e-kube-api-access-7nqwz\") pod \"dnsmasq-dns-666b6646f7-m6tsl\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.882184 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-dns-svc\") pod \"dnsmasq-dns-666b6646f7-m6tsl\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.882272 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-config\") pod \"dnsmasq-dns-666b6646f7-m6tsl\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.984980 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nqwz\" (UniqueName: \"kubernetes.io/projected/468dedb3-3ad1-4df5-9527-2d9e35f7609e-kube-api-access-7nqwz\") pod \"dnsmasq-dns-666b6646f7-m6tsl\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.985121 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-dns-svc\") pod \"dnsmasq-dns-666b6646f7-m6tsl\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.985156 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-config\") pod \"dnsmasq-dns-666b6646f7-m6tsl\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.986310 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-config\") pod \"dnsmasq-dns-666b6646f7-m6tsl\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:11 crc kubenswrapper[4995]: I0120 16:48:11.986624 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-dns-svc\") pod \"dnsmasq-dns-666b6646f7-m6tsl\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.016749 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nqwz\" (UniqueName: \"kubernetes.io/projected/468dedb3-3ad1-4df5-9527-2d9e35f7609e-kube-api-access-7nqwz\") pod \"dnsmasq-dns-666b6646f7-m6tsl\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.018306 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5s5qj"] Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.053151 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.074153 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-98snw"] Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.076363 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.098293 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-98snw"] Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.188858 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-config\") pod \"dnsmasq-dns-57d769cc4f-98snw\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.188974 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-98snw\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.188997 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xccsc\" (UniqueName: \"kubernetes.io/projected/ab782ff5-9039-481c-92a5-65ffda6ad787-kube-api-access-xccsc\") pod \"dnsmasq-dns-57d769cc4f-98snw\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.290492 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-98snw\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.290532 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xccsc\" (UniqueName: \"kubernetes.io/projected/ab782ff5-9039-481c-92a5-65ffda6ad787-kube-api-access-xccsc\") pod \"dnsmasq-dns-57d769cc4f-98snw\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.290611 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-config\") pod \"dnsmasq-dns-57d769cc4f-98snw\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.291696 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-config\") pod \"dnsmasq-dns-57d769cc4f-98snw\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.292214 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-98snw\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.313833 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xccsc\" (UniqueName: \"kubernetes.io/projected/ab782ff5-9039-481c-92a5-65ffda6ad787-kube-api-access-xccsc\") pod \"dnsmasq-dns-57d769cc4f-98snw\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.455297 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.550670 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-m6tsl"] Jan 20 16:48:12 crc kubenswrapper[4995]: W0120 16:48:12.556772 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod468dedb3_3ad1_4df5_9527_2d9e35f7609e.slice/crio-d8c494b241d6d68d8fc485d5e83d3cb9bc04a2ea240a88cfca49dbc7a4e30486 WatchSource:0}: Error finding container d8c494b241d6d68d8fc485d5e83d3cb9bc04a2ea240a88cfca49dbc7a4e30486: Status 404 returned error can't find the container with id d8c494b241d6d68d8fc485d5e83d3cb9bc04a2ea240a88cfca49dbc7a4e30486 Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.896361 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.898685 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.901320 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-nsxr9" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.902447 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.902825 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.903037 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.903415 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.903613 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.904362 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.908508 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-98snw"] Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.916875 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.999897 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-config-data\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:12 crc kubenswrapper[4995]: I0120 16:48:12.999960 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:12.999987 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4055d0be-e174-4fb9-9026-1a0499fe9dc6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.000021 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.000047 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.000063 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sclxt\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-kube-api-access-sclxt\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.000107 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.000152 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4055d0be-e174-4fb9-9026-1a0499fe9dc6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.000176 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.000203 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.000219 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101028 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101089 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101108 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sclxt\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-kube-api-access-sclxt\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101160 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101201 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4055d0be-e174-4fb9-9026-1a0499fe9dc6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101220 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101241 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101257 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101277 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-config-data\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101307 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.101328 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4055d0be-e174-4fb9-9026-1a0499fe9dc6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.102517 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.103139 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.103278 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-config-data\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.103436 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.103618 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.104899 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.107506 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4055d0be-e174-4fb9-9026-1a0499fe9dc6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.107532 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.109048 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.109275 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4055d0be-e174-4fb9-9026-1a0499fe9dc6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.121189 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sclxt\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-kube-api-access-sclxt\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.124119 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.178344 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.180529 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.186420 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.186615 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.186939 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.187094 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.187869 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.188000 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.188150 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-cxrpf" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.200647 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.218187 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304287 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304338 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304363 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304514 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/79c459b9-ccad-49a5-b945-64903e2c5308-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304572 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304620 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgpjp\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-kube-api-access-cgpjp\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304650 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304726 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304777 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/79c459b9-ccad-49a5-b945-64903e2c5308-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304858 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.304929 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406589 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406650 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/79c459b9-ccad-49a5-b945-64903e2c5308-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406696 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406735 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406774 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406805 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406834 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406867 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/79c459b9-ccad-49a5-b945-64903e2c5308-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406892 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406904 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.407148 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.407917 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.407995 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.408209 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.406916 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgpjp\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-kube-api-access-cgpjp\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.408520 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.410779 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.410880 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/79c459b9-ccad-49a5-b945-64903e2c5308-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.413162 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.413623 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/79c459b9-ccad-49a5-b945-64903e2c5308-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.426593 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.429481 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.433116 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgpjp\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-kube-api-access-cgpjp\") pod \"rabbitmq-cell1-server-0\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.507563 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:48:13 crc kubenswrapper[4995]: I0120 16:48:13.521208 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" event={"ID":"468dedb3-3ad1-4df5-9527-2d9e35f7609e","Type":"ContainerStarted","Data":"d8c494b241d6d68d8fc485d5e83d3cb9bc04a2ea240a88cfca49dbc7a4e30486"} Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.261796 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.263457 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.265986 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.266241 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.266344 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.266387 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-9nv5w" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.275501 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.286659 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.421516 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptgnk\" (UniqueName: \"kubernetes.io/projected/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-kube-api-access-ptgnk\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.421628 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-config-data-default\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.421670 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-operator-scripts\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.421694 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-kolla-config\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.421903 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.421960 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.421995 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-config-data-generated\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.422094 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.523869 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.524257 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.524289 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-config-data-generated\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.524333 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.524368 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptgnk\" (UniqueName: \"kubernetes.io/projected/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-kube-api-access-ptgnk\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.524434 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-config-data-default\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.524467 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-operator-scripts\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.524469 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.524491 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-kolla-config\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.524685 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-config-data-generated\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.525110 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-kolla-config\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.525842 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-config-data-default\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.526576 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-operator-scripts\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.535215 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.547838 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.554779 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptgnk\" (UniqueName: \"kubernetes.io/projected/e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6-kube-api-access-ptgnk\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.563581 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6\") " pod="openstack/openstack-galera-0" Jan 20 16:48:14 crc kubenswrapper[4995]: I0120 16:48:14.590377 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.553042 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" event={"ID":"ab782ff5-9039-481c-92a5-65ffda6ad787","Type":"ContainerStarted","Data":"8dd6f65a34a57acc43160cd730afe036bd0ff9db32051daa22da2fd04a3150e5"} Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.619068 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.620936 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.652589 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.652995 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.653347 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.654013 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.654177 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-5zdj8" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.756144 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpdf7\" (UniqueName: \"kubernetes.io/projected/a68274bb-aba1-4c92-85ae-8e043d5ac325-kube-api-access-lpdf7\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.756209 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a68274bb-aba1-4c92-85ae-8e043d5ac325-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.756239 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a68274bb-aba1-4c92-85ae-8e043d5ac325-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.756294 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.756432 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a68274bb-aba1-4c92-85ae-8e043d5ac325-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.756491 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a68274bb-aba1-4c92-85ae-8e043d5ac325-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.756544 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68274bb-aba1-4c92-85ae-8e043d5ac325-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.756569 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a68274bb-aba1-4c92-85ae-8e043d5ac325-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.858201 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a68274bb-aba1-4c92-85ae-8e043d5ac325-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.858245 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68274bb-aba1-4c92-85ae-8e043d5ac325-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.858270 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpdf7\" (UniqueName: \"kubernetes.io/projected/a68274bb-aba1-4c92-85ae-8e043d5ac325-kube-api-access-lpdf7\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.858294 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a68274bb-aba1-4c92-85ae-8e043d5ac325-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.858312 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a68274bb-aba1-4c92-85ae-8e043d5ac325-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.858353 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.858396 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a68274bb-aba1-4c92-85ae-8e043d5ac325-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.858426 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a68274bb-aba1-4c92-85ae-8e043d5ac325-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.859096 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a68274bb-aba1-4c92-85ae-8e043d5ac325-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.859330 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/a68274bb-aba1-4c92-85ae-8e043d5ac325-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.859400 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.859694 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/a68274bb-aba1-4c92-85ae-8e043d5ac325-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.861682 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a68274bb-aba1-4c92-85ae-8e043d5ac325-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.863171 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/a68274bb-aba1-4c92-85ae-8e043d5ac325-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.866744 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68274bb-aba1-4c92-85ae-8e043d5ac325-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.878988 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpdf7\" (UniqueName: \"kubernetes.io/projected/a68274bb-aba1-4c92-85ae-8e043d5ac325-kube-api-access-lpdf7\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.892643 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"a68274bb-aba1-4c92-85ae-8e043d5ac325\") " pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.950772 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.951766 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.954662 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.954940 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 20 16:48:15 crc kubenswrapper[4995]: I0120 16:48:15.955063 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-5lgnk" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:15.975238 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:15.975561 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.060582 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f951f50c-486d-4038-a43a-4d40fa1812de-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.060622 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f951f50c-486d-4038-a43a-4d40fa1812de-kolla-config\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.060650 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f951f50c-486d-4038-a43a-4d40fa1812de-config-data\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.060667 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qv7nf\" (UniqueName: \"kubernetes.io/projected/f951f50c-486d-4038-a43a-4d40fa1812de-kube-api-access-qv7nf\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.060829 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f951f50c-486d-4038-a43a-4d40fa1812de-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.162362 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f951f50c-486d-4038-a43a-4d40fa1812de-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.162419 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f951f50c-486d-4038-a43a-4d40fa1812de-kolla-config\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.162456 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f951f50c-486d-4038-a43a-4d40fa1812de-config-data\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.162475 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qv7nf\" (UniqueName: \"kubernetes.io/projected/f951f50c-486d-4038-a43a-4d40fa1812de-kube-api-access-qv7nf\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.162536 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f951f50c-486d-4038-a43a-4d40fa1812de-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.164213 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f951f50c-486d-4038-a43a-4d40fa1812de-kolla-config\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.166036 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f951f50c-486d-4038-a43a-4d40fa1812de-config-data\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.166994 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f951f50c-486d-4038-a43a-4d40fa1812de-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.176995 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f951f50c-486d-4038-a43a-4d40fa1812de-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.179022 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qv7nf\" (UniqueName: \"kubernetes.io/projected/f951f50c-486d-4038-a43a-4d40fa1812de-kube-api-access-qv7nf\") pod \"memcached-0\" (UID: \"f951f50c-486d-4038-a43a-4d40fa1812de\") " pod="openstack/memcached-0" Jan 20 16:48:16 crc kubenswrapper[4995]: I0120 16:48:16.269538 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 20 16:48:18 crc kubenswrapper[4995]: I0120 16:48:18.068545 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 16:48:18 crc kubenswrapper[4995]: I0120 16:48:18.070152 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 16:48:18 crc kubenswrapper[4995]: I0120 16:48:18.071927 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-4m5jf" Jan 20 16:48:18 crc kubenswrapper[4995]: I0120 16:48:18.088945 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 16:48:18 crc kubenswrapper[4995]: I0120 16:48:18.192624 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwzhs\" (UniqueName: \"kubernetes.io/projected/1650b6ff-e786-4244-bb71-5611bc85fa90-kube-api-access-vwzhs\") pod \"kube-state-metrics-0\" (UID: \"1650b6ff-e786-4244-bb71-5611bc85fa90\") " pod="openstack/kube-state-metrics-0" Jan 20 16:48:18 crc kubenswrapper[4995]: I0120 16:48:18.294314 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwzhs\" (UniqueName: \"kubernetes.io/projected/1650b6ff-e786-4244-bb71-5611bc85fa90-kube-api-access-vwzhs\") pod \"kube-state-metrics-0\" (UID: \"1650b6ff-e786-4244-bb71-5611bc85fa90\") " pod="openstack/kube-state-metrics-0" Jan 20 16:48:18 crc kubenswrapper[4995]: I0120 16:48:18.338901 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwzhs\" (UniqueName: \"kubernetes.io/projected/1650b6ff-e786-4244-bb71-5611bc85fa90-kube-api-access-vwzhs\") pod \"kube-state-metrics-0\" (UID: \"1650b6ff-e786-4244-bb71-5611bc85fa90\") " pod="openstack/kube-state-metrics-0" Jan 20 16:48:18 crc kubenswrapper[4995]: I0120 16:48:18.399235 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.394480 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.400046 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.403224 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.403265 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.403304 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.403244 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.403447 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.403510 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.403730 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-58l4k" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.411427 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.420849 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514020 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514088 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514289 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514347 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlp5p\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-kube-api-access-zlp5p\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514391 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514430 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514551 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514592 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514679 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.514748 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615714 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615756 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615785 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615814 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615847 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615872 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615916 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615934 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlp5p\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-kube-api-access-zlp5p\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615954 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.615974 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.616504 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.616609 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.617105 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.619018 4995 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.619053 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a3a36306f7d3d2f24937466925b0b10e100df05e864ec7bc951230e86c72f354/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.619764 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.619929 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.624216 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.626386 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.627580 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.633754 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlp5p\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-kube-api-access-zlp5p\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.668222 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:19 crc kubenswrapper[4995]: I0120 16:48:19.723818 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.172700 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-spc7x"] Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.174141 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.177431 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.178071 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.178676 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-fpf9q" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.182824 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-spc7x"] Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.191592 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-q9nkf"] Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.194320 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.210655 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-q9nkf"] Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.241345 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/54be3683-2d75-43fd-8301-e05b2a5103cc-ovn-controller-tls-certs\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.241391 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/54be3683-2d75-43fd-8301-e05b2a5103cc-scripts\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.241423 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54be3683-2d75-43fd-8301-e05b2a5103cc-combined-ca-bundle\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.241443 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/54be3683-2d75-43fd-8301-e05b2a5103cc-var-run-ovn\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.241491 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/54be3683-2d75-43fd-8301-e05b2a5103cc-var-run\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.241512 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtc2z\" (UniqueName: \"kubernetes.io/projected/54be3683-2d75-43fd-8301-e05b2a5103cc-kube-api-access-jtc2z\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.241553 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/54be3683-2d75-43fd-8301-e05b2a5103cc-var-log-ovn\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343124 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/54be3683-2d75-43fd-8301-e05b2a5103cc-var-run\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343199 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtc2z\" (UniqueName: \"kubernetes.io/projected/54be3683-2d75-43fd-8301-e05b2a5103cc-kube-api-access-jtc2z\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343256 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/54be3683-2d75-43fd-8301-e05b2a5103cc-var-log-ovn\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343279 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-etc-ovs\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343314 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97t6j\" (UniqueName: \"kubernetes.io/projected/028bd686-8a70-4866-968f-c29ab470e44c-kube-api-access-97t6j\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343331 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-var-run\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343348 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-var-lib\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343363 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/028bd686-8a70-4866-968f-c29ab470e44c-scripts\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343389 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/54be3683-2d75-43fd-8301-e05b2a5103cc-ovn-controller-tls-certs\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343468 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/54be3683-2d75-43fd-8301-e05b2a5103cc-scripts\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343534 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-var-log\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343566 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54be3683-2d75-43fd-8301-e05b2a5103cc-combined-ca-bundle\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343611 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/54be3683-2d75-43fd-8301-e05b2a5103cc-var-run-ovn\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343814 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/54be3683-2d75-43fd-8301-e05b2a5103cc-var-log-ovn\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.343944 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/54be3683-2d75-43fd-8301-e05b2a5103cc-var-run-ovn\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.346245 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/54be3683-2d75-43fd-8301-e05b2a5103cc-scripts\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.346684 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/54be3683-2d75-43fd-8301-e05b2a5103cc-var-run\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.350931 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54be3683-2d75-43fd-8301-e05b2a5103cc-combined-ca-bundle\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.361924 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/54be3683-2d75-43fd-8301-e05b2a5103cc-ovn-controller-tls-certs\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.371481 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtc2z\" (UniqueName: \"kubernetes.io/projected/54be3683-2d75-43fd-8301-e05b2a5103cc-kube-api-access-jtc2z\") pod \"ovn-controller-spc7x\" (UID: \"54be3683-2d75-43fd-8301-e05b2a5103cc\") " pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.444757 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-etc-ovs\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.444818 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97t6j\" (UniqueName: \"kubernetes.io/projected/028bd686-8a70-4866-968f-c29ab470e44c-kube-api-access-97t6j\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.444835 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-var-run\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.444854 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-var-lib\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.444871 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/028bd686-8a70-4866-968f-c29ab470e44c-scripts\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.444897 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-var-log\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.445469 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-etc-ovs\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.445629 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-var-lib\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.445711 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-var-run\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.446845 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/028bd686-8a70-4866-968f-c29ab470e44c-var-log\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.448359 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/028bd686-8a70-4866-968f-c29ab470e44c-scripts\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.460051 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97t6j\" (UniqueName: \"kubernetes.io/projected/028bd686-8a70-4866-968f-c29ab470e44c-kube-api-access-97t6j\") pod \"ovn-controller-ovs-q9nkf\" (UID: \"028bd686-8a70-4866-968f-c29ab470e44c\") " pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.503133 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.518503 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.726513 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.736353 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.740391 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.740641 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.740742 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.740970 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.741800 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-tswjj" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.745217 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.853830 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-config\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.853874 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.853914 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.854071 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwh64\" (UniqueName: \"kubernetes.io/projected/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-kube-api-access-qwh64\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.854147 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.854238 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.854283 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.854312 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955207 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwh64\" (UniqueName: \"kubernetes.io/projected/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-kube-api-access-qwh64\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955245 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955290 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955318 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955347 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955405 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-config\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955420 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955446 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955713 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.955765 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.959669 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.960001 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.960239 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.960407 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.968993 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-config\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.969951 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.970376 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.973821 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.974362 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.975858 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwh64\" (UniqueName: \"kubernetes.io/projected/929cb9df-f5d9-4b0b-972c-5b79b6e28ab8-kube-api-access-qwh64\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:21 crc kubenswrapper[4995]: I0120 16:48:21.977514 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8\") " pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:22 crc kubenswrapper[4995]: I0120 16:48:22.067896 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-tswjj" Jan 20 16:48:22 crc kubenswrapper[4995]: I0120 16:48:22.077232 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:24 crc kubenswrapper[4995]: I0120 16:48:24.335252 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 20 16:48:24 crc kubenswrapper[4995]: W0120 16:48:24.835119 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda68274bb_aba1_4c92_85ae_8e043d5ac325.slice/crio-473f05ea1d9bd7ddbeae1033fcecc55b4dae9a8c5e34d1bf0075eefbff744afc WatchSource:0}: Error finding container 473f05ea1d9bd7ddbeae1033fcecc55b4dae9a8c5e34d1bf0075eefbff744afc: Status 404 returned error can't find the container with id 473f05ea1d9bd7ddbeae1033fcecc55b4dae9a8c5e34d1bf0075eefbff744afc Jan 20 16:48:24 crc kubenswrapper[4995]: E0120 16:48:24.854263 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 20 16:48:24 crc kubenswrapper[4995]: E0120 16:48:24.854443 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v4m6b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-5s5qj_openstack(be18c2c9-a4a5-44ae-963a-97b9d4204529): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:48:24 crc kubenswrapper[4995]: E0120 16:48:24.855671 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" podUID="be18c2c9-a4a5-44ae-963a-97b9d4204529" Jan 20 16:48:24 crc kubenswrapper[4995]: E0120 16:48:24.864354 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 20 16:48:24 crc kubenswrapper[4995]: E0120 16:48:24.864536 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pq7p6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-mvx52_openstack(97025292-0160-4692-9cf4-e377bd46e1b2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:48:24 crc kubenswrapper[4995]: E0120 16:48:24.867385 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" podUID="97025292-0160-4692-9cf4-e377bd46e1b2" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.369939 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.374538 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.378129 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-9sbxz" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.378512 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.378703 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.378991 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.394682 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.442177 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.457417 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-spc7x"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.464873 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.522121 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.522194 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.522278 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.522304 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.522338 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.522374 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvrjc\" (UniqueName: \"kubernetes.io/projected/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-kube-api-access-kvrjc\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.522399 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-config\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.522426 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.549045 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.624526 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.624610 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.624679 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.624732 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.624768 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.624804 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.624839 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvrjc\" (UniqueName: \"kubernetes.io/projected/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-kube-api-access-kvrjc\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.624865 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-config\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.625174 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.626045 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.626637 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-config\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.627057 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.630233 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.636560 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.637835 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.662334 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a68274bb-aba1-4c92-85ae-8e043d5ac325","Type":"ContainerStarted","Data":"473f05ea1d9bd7ddbeae1033fcecc55b4dae9a8c5e34d1bf0075eefbff744afc"} Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.667995 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvrjc\" (UniqueName: \"kubernetes.io/projected/9283bc9e-66ee-4ded-b64e-3bdca7f112b4-kube-api-access-kvrjc\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.672415 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4055d0be-e174-4fb9-9026-1a0499fe9dc6","Type":"ContainerStarted","Data":"040d368f0bb56ae1400105700fdcdbf596c8829ca86ebbb8d7f8a2b672a29b53"} Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.677581 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-spc7x" event={"ID":"54be3683-2d75-43fd-8301-e05b2a5103cc","Type":"ContainerStarted","Data":"f164e16f877cf067c67d12e4b0a0b82f9959f4ab802b2a8932aac6533161e0a6"} Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.683610 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1650b6ff-e786-4244-bb71-5611bc85fa90","Type":"ContainerStarted","Data":"d45bf6dc5d45302cd6c6c7c1457831ec63c15dee3af457398a44c2fe2bac6b97"} Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.690053 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"9283bc9e-66ee-4ded-b64e-3bdca7f112b4\") " pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.698144 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.708292 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.710267 4995 generic.go:334] "Generic (PLEG): container finished" podID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" containerID="3e8f76a740240c9db8dd747b7487628464d0e37f1aaabfc79a603881ca6b9153" exitCode=0 Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.710368 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" event={"ID":"468dedb3-3ad1-4df5-9527-2d9e35f7609e","Type":"ContainerDied","Data":"3e8f76a740240c9db8dd747b7487628464d0e37f1aaabfc79a603881ca6b9153"} Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.715978 4995 generic.go:334] "Generic (PLEG): container finished" podID="ab782ff5-9039-481c-92a5-65ffda6ad787" containerID="a6061c25a59c9970aa2cac30ea29289da62fcea0d72ab35e39071b5d1deadc1a" exitCode=0 Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.716064 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" event={"ID":"ab782ff5-9039-481c-92a5-65ffda6ad787","Type":"ContainerDied","Data":"a6061c25a59c9970aa2cac30ea29289da62fcea0d72ab35e39071b5d1deadc1a"} Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.716922 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.718827 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"79c459b9-ccad-49a5-b945-64903e2c5308","Type":"ContainerStarted","Data":"2abf40b6d8836fc2d50620d52df63f67070bd8552605ee01760a0ee284703e59"} Jan 20 16:48:25 crc kubenswrapper[4995]: W0120 16:48:25.722834 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod028bd686_8a70_4866_968f_c29ab470e44c.slice/crio-e24209bfbe1dfe9df7fd19ad0790d5b674c16f610e24572c672184cc68274cca WatchSource:0}: Error finding container e24209bfbe1dfe9df7fd19ad0790d5b674c16f610e24572c672184cc68274cca: Status 404 returned error can't find the container with id e24209bfbe1dfe9df7fd19ad0790d5b674c16f610e24572c672184cc68274cca Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.733713 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.746375 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-q9nkf"] Jan 20 16:48:25 crc kubenswrapper[4995]: I0120 16:48:25.821656 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.072671 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.137002 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-dns-svc\") pod \"be18c2c9-a4a5-44ae-963a-97b9d4204529\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.137216 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-config\") pod \"be18c2c9-a4a5-44ae-963a-97b9d4204529\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.137377 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4m6b\" (UniqueName: \"kubernetes.io/projected/be18c2c9-a4a5-44ae-963a-97b9d4204529-kube-api-access-v4m6b\") pod \"be18c2c9-a4a5-44ae-963a-97b9d4204529\" (UID: \"be18c2c9-a4a5-44ae-963a-97b9d4204529\") " Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.138426 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-config" (OuterVolumeSpecName: "config") pod "be18c2c9-a4a5-44ae-963a-97b9d4204529" (UID: "be18c2c9-a4a5-44ae-963a-97b9d4204529"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.138450 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "be18c2c9-a4a5-44ae-963a-97b9d4204529" (UID: "be18c2c9-a4a5-44ae-963a-97b9d4204529"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.142640 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be18c2c9-a4a5-44ae-963a-97b9d4204529-kube-api-access-v4m6b" (OuterVolumeSpecName: "kube-api-access-v4m6b") pod "be18c2c9-a4a5-44ae-963a-97b9d4204529" (UID: "be18c2c9-a4a5-44ae-963a-97b9d4204529"). InnerVolumeSpecName "kube-api-access-v4m6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.237137 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.238837 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4m6b\" (UniqueName: \"kubernetes.io/projected/be18c2c9-a4a5-44ae-963a-97b9d4204529-kube-api-access-v4m6b\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.238854 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.238865 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be18c2c9-a4a5-44ae-963a-97b9d4204529-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.316930 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.339928 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pq7p6\" (UniqueName: \"kubernetes.io/projected/97025292-0160-4692-9cf4-e377bd46e1b2-kube-api-access-pq7p6\") pod \"97025292-0160-4692-9cf4-e377bd46e1b2\" (UID: \"97025292-0160-4692-9cf4-e377bd46e1b2\") " Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.339963 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97025292-0160-4692-9cf4-e377bd46e1b2-config\") pod \"97025292-0160-4692-9cf4-e377bd46e1b2\" (UID: \"97025292-0160-4692-9cf4-e377bd46e1b2\") " Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.346947 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97025292-0160-4692-9cf4-e377bd46e1b2-config" (OuterVolumeSpecName: "config") pod "97025292-0160-4692-9cf4-e377bd46e1b2" (UID: "97025292-0160-4692-9cf4-e377bd46e1b2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.357259 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97025292-0160-4692-9cf4-e377bd46e1b2-kube-api-access-pq7p6" (OuterVolumeSpecName: "kube-api-access-pq7p6") pod "97025292-0160-4692-9cf4-e377bd46e1b2" (UID: "97025292-0160-4692-9cf4-e377bd46e1b2"). InnerVolumeSpecName "kube-api-access-pq7p6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.442024 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pq7p6\" (UniqueName: \"kubernetes.io/projected/97025292-0160-4692-9cf4-e377bd46e1b2-kube-api-access-pq7p6\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.442055 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/97025292-0160-4692-9cf4-e377bd46e1b2-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:26 crc kubenswrapper[4995]: W0120 16:48:26.583658 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9283bc9e_66ee_4ded_b64e_3bdca7f112b4.slice/crio-f7eaf413ffc6a2afcb27702b53609d3e7bb4f6b4d6b050b986acdb5c86dac3f8 WatchSource:0}: Error finding container f7eaf413ffc6a2afcb27702b53609d3e7bb4f6b4d6b050b986acdb5c86dac3f8: Status 404 returned error can't find the container with id f7eaf413ffc6a2afcb27702b53609d3e7bb4f6b4d6b050b986acdb5c86dac3f8 Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.740229 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-vfjkw"] Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.741974 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.744853 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.747652 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-vfjkw"] Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.751762 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" event={"ID":"97025292-0160-4692-9cf4-e377bd46e1b2","Type":"ContainerDied","Data":"12a80fa8790315de9d49664010ee8244eaacad244c43518f5e8dfb8cf6890e97"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.751823 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-mvx52" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.759657 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6","Type":"ContainerStarted","Data":"4e6506f32b32aa85f311e8c3b9c0344e195d42f4b01b72a10a646a138f44cf1d"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.764797 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f951f50c-486d-4038-a43a-4d40fa1812de","Type":"ContainerStarted","Data":"868beeeeb9fc6c48fb20822b0a7b0cf406d8c1a71b26fa7dfc16501763cf5226"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.768886 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q9nkf" event={"ID":"028bd686-8a70-4866-968f-c29ab470e44c","Type":"ContainerStarted","Data":"e24209bfbe1dfe9df7fd19ad0790d5b674c16f610e24572c672184cc68274cca"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.774843 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" event={"ID":"ab782ff5-9039-481c-92a5-65ffda6ad787","Type":"ContainerStarted","Data":"0314be62914cd157793a1b8088197c721883349ce6126d06c6f9b692b2ea435d"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.776661 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.800734 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" event={"ID":"468dedb3-3ad1-4df5-9527-2d9e35f7609e","Type":"ContainerStarted","Data":"fa8cfdd998f115c90cf6e66d185e06ea337de4eeb9ac2537290fca7574de5942"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.801027 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.801981 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" event={"ID":"be18c2c9-a4a5-44ae-963a-97b9d4204529","Type":"ContainerDied","Data":"eb025cb21fd3140e06c272a35b672f2b047a1a8b946c43ec94f3028ff1f84660"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.802049 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-5s5qj" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.805406 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" podStartSLOduration=5.218124392 podStartE2EDuration="14.805243659s" podCreationTimestamp="2026-01-20 16:48:12 +0000 UTC" firstStartedPulling="2026-01-20 16:48:15.366134811 +0000 UTC m=+1013.610739617" lastFinishedPulling="2026-01-20 16:48:24.953254088 +0000 UTC m=+1023.197858884" observedRunningTime="2026-01-20 16:48:26.791663501 +0000 UTC m=+1025.036268307" watchObservedRunningTime="2026-01-20 16:48:26.805243659 +0000 UTC m=+1025.049848465" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.812649 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerStarted","Data":"a18665aca8896dbe98c6ebad2216d9532b6f5e3f2f2c257305344d38743787b6"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.826650 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"9283bc9e-66ee-4ded-b64e-3bdca7f112b4","Type":"ContainerStarted","Data":"f7eaf413ffc6a2afcb27702b53609d3e7bb4f6b4d6b050b986acdb5c86dac3f8"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.833482 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8","Type":"ContainerStarted","Data":"c851e6ec8a2bd137e5e0f9699a5f51b00507b1a37b8eb8b5d23c4ea1b5dbc18d"} Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.836766 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" podStartSLOduration=3.457459039 podStartE2EDuration="15.836746853s" podCreationTimestamp="2026-01-20 16:48:11 +0000 UTC" firstStartedPulling="2026-01-20 16:48:12.56385554 +0000 UTC m=+1010.808460346" lastFinishedPulling="2026-01-20 16:48:24.943143354 +0000 UTC m=+1023.187748160" observedRunningTime="2026-01-20 16:48:26.826922817 +0000 UTC m=+1025.071527623" watchObservedRunningTime="2026-01-20 16:48:26.836746853 +0000 UTC m=+1025.081351659" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.848557 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5e655fe2-263f-4d77-b9fd-af0528012527-ovs-rundir\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.848634 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e655fe2-263f-4d77-b9fd-af0528012527-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.848691 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e655fe2-263f-4d77-b9fd-af0528012527-combined-ca-bundle\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.848749 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8j6kp\" (UniqueName: \"kubernetes.io/projected/5e655fe2-263f-4d77-b9fd-af0528012527-kube-api-access-8j6kp\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.848770 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5e655fe2-263f-4d77-b9fd-af0528012527-ovn-rundir\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.848787 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e655fe2-263f-4d77-b9fd-af0528012527-config\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.883126 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-98snw"] Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.908158 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mvx52"] Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.911174 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-mvx52"] Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.917700 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-6r8cj"] Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.931863 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.938999 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.950491 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-6r8cj"] Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.965935 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-config\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.966170 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.966265 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e655fe2-263f-4d77-b9fd-af0528012527-combined-ca-bundle\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.966449 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8j6kp\" (UniqueName: \"kubernetes.io/projected/5e655fe2-263f-4d77-b9fd-af0528012527-kube-api-access-8j6kp\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.966559 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5e655fe2-263f-4d77-b9fd-af0528012527-ovn-rundir\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.966649 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e655fe2-263f-4d77-b9fd-af0528012527-config\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.966728 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zs2xj\" (UniqueName: \"kubernetes.io/projected/35de482a-a071-43c9-b69b-531bdb674894-kube-api-access-zs2xj\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.966841 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.966997 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5e655fe2-263f-4d77-b9fd-af0528012527-ovs-rundir\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.967161 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e655fe2-263f-4d77-b9fd-af0528012527-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:26 crc kubenswrapper[4995]: I0120 16:48:26.967907 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/5e655fe2-263f-4d77-b9fd-af0528012527-ovn-rundir\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.019308 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5e655fe2-263f-4d77-b9fd-af0528012527-config\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.019523 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/5e655fe2-263f-4d77-b9fd-af0528012527-ovs-rundir\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.029978 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e655fe2-263f-4d77-b9fd-af0528012527-combined-ca-bundle\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.041825 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8j6kp\" (UniqueName: \"kubernetes.io/projected/5e655fe2-263f-4d77-b9fd-af0528012527-kube-api-access-8j6kp\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.067334 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e655fe2-263f-4d77-b9fd-af0528012527-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-vfjkw\" (UID: \"5e655fe2-263f-4d77-b9fd-af0528012527\") " pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.075929 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-config\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.075980 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.076154 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zs2xj\" (UniqueName: \"kubernetes.io/projected/35de482a-a071-43c9-b69b-531bdb674894-kube-api-access-zs2xj\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.076211 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.076565 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-vfjkw" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.078446 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.079979 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-config\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.093420 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.119748 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5s5qj"] Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.132135 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5s5qj"] Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.132345 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zs2xj\" (UniqueName: \"kubernetes.io/projected/35de482a-a071-43c9-b69b-531bdb674894-kube-api-access-zs2xj\") pod \"dnsmasq-dns-7fd796d7df-6r8cj\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.162276 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-m6tsl"] Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.171179 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-dvx7h"] Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.172752 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.177016 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.178134 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-dvx7h"] Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.268422 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.280599 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-config\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.280640 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.280682 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.280832 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.281063 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcd2x\" (UniqueName: \"kubernetes.io/projected/471d34ec-7dff-4d2c-92c6-3319ac7db16e-kube-api-access-mcd2x\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.382858 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcd2x\" (UniqueName: \"kubernetes.io/projected/471d34ec-7dff-4d2c-92c6-3319ac7db16e-kube-api-access-mcd2x\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.382926 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-config\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.382947 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.382981 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.383012 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.384064 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.384808 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-config\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.385318 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.385780 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.412706 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcd2x\" (UniqueName: \"kubernetes.io/projected/471d34ec-7dff-4d2c-92c6-3319ac7db16e-kube-api-access-mcd2x\") pod \"dnsmasq-dns-86db49b7ff-dvx7h\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.502852 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.999034 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97025292-0160-4692-9cf4-e377bd46e1b2" path="/var/lib/kubelet/pods/97025292-0160-4692-9cf4-e377bd46e1b2/volumes" Jan 20 16:48:27 crc kubenswrapper[4995]: I0120 16:48:27.999640 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be18c2c9-a4a5-44ae-963a-97b9d4204529" path="/var/lib/kubelet/pods/be18c2c9-a4a5-44ae-963a-97b9d4204529/volumes" Jan 20 16:48:28 crc kubenswrapper[4995]: I0120 16:48:28.099557 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-vfjkw"] Jan 20 16:48:28 crc kubenswrapper[4995]: I0120 16:48:28.850498 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" podUID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" containerName="dnsmasq-dns" containerID="cri-o://fa8cfdd998f115c90cf6e66d185e06ea337de4eeb9ac2537290fca7574de5942" gracePeriod=10 Jan 20 16:48:28 crc kubenswrapper[4995]: I0120 16:48:28.850569 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" podUID="ab782ff5-9039-481c-92a5-65ffda6ad787" containerName="dnsmasq-dns" containerID="cri-o://0314be62914cd157793a1b8088197c721883349ce6126d06c6f9b692b2ea435d" gracePeriod=10 Jan 20 16:48:29 crc kubenswrapper[4995]: W0120 16:48:29.022648 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e655fe2_263f_4d77_b9fd_af0528012527.slice/crio-52f81a089099e866b6392928ebf04250e359b44449648822e657405ebbce3449 WatchSource:0}: Error finding container 52f81a089099e866b6392928ebf04250e359b44449648822e657405ebbce3449: Status 404 returned error can't find the container with id 52f81a089099e866b6392928ebf04250e359b44449648822e657405ebbce3449 Jan 20 16:48:29 crc kubenswrapper[4995]: I0120 16:48:29.483205 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-6r8cj"] Jan 20 16:48:29 crc kubenswrapper[4995]: I0120 16:48:29.863618 4995 generic.go:334] "Generic (PLEG): container finished" podID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" containerID="fa8cfdd998f115c90cf6e66d185e06ea337de4eeb9ac2537290fca7574de5942" exitCode=0 Jan 20 16:48:29 crc kubenswrapper[4995]: I0120 16:48:29.863713 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" event={"ID":"468dedb3-3ad1-4df5-9527-2d9e35f7609e","Type":"ContainerDied","Data":"fa8cfdd998f115c90cf6e66d185e06ea337de4eeb9ac2537290fca7574de5942"} Jan 20 16:48:29 crc kubenswrapper[4995]: I0120 16:48:29.866746 4995 generic.go:334] "Generic (PLEG): container finished" podID="ab782ff5-9039-481c-92a5-65ffda6ad787" containerID="0314be62914cd157793a1b8088197c721883349ce6126d06c6f9b692b2ea435d" exitCode=0 Jan 20 16:48:29 crc kubenswrapper[4995]: I0120 16:48:29.866887 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" event={"ID":"ab782ff5-9039-481c-92a5-65ffda6ad787","Type":"ContainerDied","Data":"0314be62914cd157793a1b8088197c721883349ce6126d06c6f9b692b2ea435d"} Jan 20 16:48:29 crc kubenswrapper[4995]: I0120 16:48:29.869602 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-vfjkw" event={"ID":"5e655fe2-263f-4d77-b9fd-af0528012527","Type":"ContainerStarted","Data":"52f81a089099e866b6392928ebf04250e359b44449648822e657405ebbce3449"} Jan 20 16:48:32 crc kubenswrapper[4995]: I0120 16:48:32.458494 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" podUID="ab782ff5-9039-481c-92a5-65ffda6ad787" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.104:5353: connect: connection refused" Jan 20 16:48:32 crc kubenswrapper[4995]: I0120 16:48:32.995967 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.080518 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-dns-svc\") pod \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.080565 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nqwz\" (UniqueName: \"kubernetes.io/projected/468dedb3-3ad1-4df5-9527-2d9e35f7609e-kube-api-access-7nqwz\") pod \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.080630 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-config\") pod \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\" (UID: \"468dedb3-3ad1-4df5-9527-2d9e35f7609e\") " Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.092349 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/468dedb3-3ad1-4df5-9527-2d9e35f7609e-kube-api-access-7nqwz" (OuterVolumeSpecName: "kube-api-access-7nqwz") pod "468dedb3-3ad1-4df5-9527-2d9e35f7609e" (UID: "468dedb3-3ad1-4df5-9527-2d9e35f7609e"). InnerVolumeSpecName "kube-api-access-7nqwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.116047 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "468dedb3-3ad1-4df5-9527-2d9e35f7609e" (UID: "468dedb3-3ad1-4df5-9527-2d9e35f7609e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.120113 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-config" (OuterVolumeSpecName: "config") pod "468dedb3-3ad1-4df5-9527-2d9e35f7609e" (UID: "468dedb3-3ad1-4df5-9527-2d9e35f7609e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.184907 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.184938 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nqwz\" (UniqueName: \"kubernetes.io/projected/468dedb3-3ad1-4df5-9527-2d9e35f7609e-kube-api-access-7nqwz\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.184950 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468dedb3-3ad1-4df5-9527-2d9e35f7609e-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:33 crc kubenswrapper[4995]: W0120 16:48:33.231571 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod35de482a_a071_43c9_b69b_531bdb674894.slice/crio-e3d33ee2e9b27da732bc4b964b5eed162870af897c44585aa429b6e848cf4033 WatchSource:0}: Error finding container e3d33ee2e9b27da732bc4b964b5eed162870af897c44585aa429b6e848cf4033: Status 404 returned error can't find the container with id e3d33ee2e9b27da732bc4b964b5eed162870af897c44585aa429b6e848cf4033 Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.343533 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.387832 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xccsc\" (UniqueName: \"kubernetes.io/projected/ab782ff5-9039-481c-92a5-65ffda6ad787-kube-api-access-xccsc\") pod \"ab782ff5-9039-481c-92a5-65ffda6ad787\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.387977 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-dns-svc\") pod \"ab782ff5-9039-481c-92a5-65ffda6ad787\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.388037 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-config\") pod \"ab782ff5-9039-481c-92a5-65ffda6ad787\" (UID: \"ab782ff5-9039-481c-92a5-65ffda6ad787\") " Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.391421 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab782ff5-9039-481c-92a5-65ffda6ad787-kube-api-access-xccsc" (OuterVolumeSpecName: "kube-api-access-xccsc") pod "ab782ff5-9039-481c-92a5-65ffda6ad787" (UID: "ab782ff5-9039-481c-92a5-65ffda6ad787"). InnerVolumeSpecName "kube-api-access-xccsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.441980 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ab782ff5-9039-481c-92a5-65ffda6ad787" (UID: "ab782ff5-9039-481c-92a5-65ffda6ad787"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.452031 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-config" (OuterVolumeSpecName: "config") pod "ab782ff5-9039-481c-92a5-65ffda6ad787" (UID: "ab782ff5-9039-481c-92a5-65ffda6ad787"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.490713 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.490749 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab782ff5-9039-481c-92a5-65ffda6ad787-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.490760 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xccsc\" (UniqueName: \"kubernetes.io/projected/ab782ff5-9039-481c-92a5-65ffda6ad787-kube-api-access-xccsc\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.902655 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" event={"ID":"468dedb3-3ad1-4df5-9527-2d9e35f7609e","Type":"ContainerDied","Data":"d8c494b241d6d68d8fc485d5e83d3cb9bc04a2ea240a88cfca49dbc7a4e30486"} Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.903050 4995 scope.go:117] "RemoveContainer" containerID="fa8cfdd998f115c90cf6e66d185e06ea337de4eeb9ac2537290fca7574de5942" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.902733 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.905250 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" event={"ID":"35de482a-a071-43c9-b69b-531bdb674894","Type":"ContainerStarted","Data":"e3d33ee2e9b27da732bc4b964b5eed162870af897c44585aa429b6e848cf4033"} Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.907035 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" event={"ID":"ab782ff5-9039-481c-92a5-65ffda6ad787","Type":"ContainerDied","Data":"8dd6f65a34a57acc43160cd730afe036bd0ff9db32051daa22da2fd04a3150e5"} Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.907145 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-98snw" Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.939543 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-m6tsl"] Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.948347 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-m6tsl"] Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.953732 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-98snw"] Jan 20 16:48:33 crc kubenswrapper[4995]: I0120 16:48:33.959541 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-98snw"] Jan 20 16:48:34 crc kubenswrapper[4995]: I0120 16:48:34.004029 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" path="/var/lib/kubelet/pods/468dedb3-3ad1-4df5-9527-2d9e35f7609e/volumes" Jan 20 16:48:34 crc kubenswrapper[4995]: I0120 16:48:34.004640 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab782ff5-9039-481c-92a5-65ffda6ad787" path="/var/lib/kubelet/pods/ab782ff5-9039-481c-92a5-65ffda6ad787/volumes" Jan 20 16:48:34 crc kubenswrapper[4995]: I0120 16:48:34.882348 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-dvx7h"] Jan 20 16:48:35 crc kubenswrapper[4995]: W0120 16:48:35.537773 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod471d34ec_7dff_4d2c_92c6_3319ac7db16e.slice/crio-9ffa2b6e8649787566c5a76f2bd0c2d54624972255e689030145332e519685ac WatchSource:0}: Error finding container 9ffa2b6e8649787566c5a76f2bd0c2d54624972255e689030145332e519685ac: Status 404 returned error can't find the container with id 9ffa2b6e8649787566c5a76f2bd0c2d54624972255e689030145332e519685ac Jan 20 16:48:35 crc kubenswrapper[4995]: I0120 16:48:35.780225 4995 scope.go:117] "RemoveContainer" containerID="3e8f76a740240c9db8dd747b7487628464d0e37f1aaabfc79a603881ca6b9153" Jan 20 16:48:35 crc kubenswrapper[4995]: I0120 16:48:35.924445 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" event={"ID":"471d34ec-7dff-4d2c-92c6-3319ac7db16e","Type":"ContainerStarted","Data":"9ffa2b6e8649787566c5a76f2bd0c2d54624972255e689030145332e519685ac"} Jan 20 16:48:35 crc kubenswrapper[4995]: I0120 16:48:35.984919 4995 scope.go:117] "RemoveContainer" containerID="0314be62914cd157793a1b8088197c721883349ce6126d06c6f9b692b2ea435d" Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.056571 4995 scope.go:117] "RemoveContainer" containerID="a6061c25a59c9970aa2cac30ea29289da62fcea0d72ab35e39071b5d1deadc1a" Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.943729 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1650b6ff-e786-4244-bb71-5611bc85fa90","Type":"ContainerStarted","Data":"d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090"} Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.944381 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.951138 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6","Type":"ContainerStarted","Data":"83d627eed7cbbafb29d57a5d4fe692c39405137012d4107d50eba3938ec1abe9"} Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.953105 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f951f50c-486d-4038-a43a-4d40fa1812de","Type":"ContainerStarted","Data":"c6e5e8a68a905e4f00b5693aa9e1e2e94fc9190681b4ac19662d522054fc49e3"} Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.953410 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.966906 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=8.464828618 podStartE2EDuration="18.966886957s" podCreationTimestamp="2026-01-20 16:48:18 +0000 UTC" firstStartedPulling="2026-01-20 16:48:25.554547903 +0000 UTC m=+1023.799152719" lastFinishedPulling="2026-01-20 16:48:36.056606252 +0000 UTC m=+1034.301211058" observedRunningTime="2026-01-20 16:48:36.964672516 +0000 UTC m=+1035.209277332" watchObservedRunningTime="2026-01-20 16:48:36.966886957 +0000 UTC m=+1035.211491763" Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.977513 4995 generic.go:334] "Generic (PLEG): container finished" podID="35de482a-a071-43c9-b69b-531bdb674894" containerID="bca0b5ae6a8e20b937afd169a29f21547f2397d92f6c811869f25809f8050d8d" exitCode=0 Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.977565 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" event={"ID":"35de482a-a071-43c9-b69b-531bdb674894","Type":"ContainerDied","Data":"bca0b5ae6a8e20b937afd169a29f21547f2397d92f6c811869f25809f8050d8d"} Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.982532 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a68274bb-aba1-4c92-85ae-8e043d5ac325","Type":"ContainerStarted","Data":"dff6f5cadbcd0920536da7364d0a4f1eb968e02fed5ebc5b4a46a2c312ab7584"} Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.984789 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-spc7x" Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.987185 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=12.225730771 podStartE2EDuration="21.987166686s" podCreationTimestamp="2026-01-20 16:48:15 +0000 UTC" firstStartedPulling="2026-01-20 16:48:25.70891261 +0000 UTC m=+1023.953517416" lastFinishedPulling="2026-01-20 16:48:35.470348485 +0000 UTC m=+1033.714953331" observedRunningTime="2026-01-20 16:48:36.980350102 +0000 UTC m=+1035.224954918" watchObservedRunningTime="2026-01-20 16:48:36.987166686 +0000 UTC m=+1035.231771492" Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.988094 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"9283bc9e-66ee-4ded-b64e-3bdca7f112b4","Type":"ContainerStarted","Data":"e48692c438273273f1a7b23dcb45715a034af4bb12c89f8a87a0f73d03a15bd2"} Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.989864 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8","Type":"ContainerStarted","Data":"6024b5348a88db952da0fa60703b8403012c23c18c8c494a1a540e9bdfbbda0b"} Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.992631 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q9nkf" event={"ID":"028bd686-8a70-4866-968f-c29ab470e44c","Type":"ContainerStarted","Data":"35ba839a8a765a2c5ea6ce9e7ccf020ebd2f27b1efa4929c4b7ea9487db18be0"} Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.995191 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-vfjkw" event={"ID":"5e655fe2-263f-4d77-b9fd-af0528012527","Type":"ContainerStarted","Data":"c201c1b36814b6593c7bb3fdcb5f0274f928a08e90e22eecc6b659efc3ed1435"} Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.996389 4995 generic.go:334] "Generic (PLEG): container finished" podID="471d34ec-7dff-4d2c-92c6-3319ac7db16e" containerID="ab4295ff69aa5bc6204ffd09a2cc1d1efb33782fbf1061e6a5b406cf880cdff6" exitCode=0 Jan 20 16:48:36 crc kubenswrapper[4995]: I0120 16:48:36.996441 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" event={"ID":"471d34ec-7dff-4d2c-92c6-3319ac7db16e","Type":"ContainerDied","Data":"ab4295ff69aa5bc6204ffd09a2cc1d1efb33782fbf1061e6a5b406cf880cdff6"} Jan 20 16:48:37 crc kubenswrapper[4995]: I0120 16:48:37.056865 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-666b6646f7-m6tsl" podUID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.103:5353: i/o timeout" Jan 20 16:48:37 crc kubenswrapper[4995]: I0120 16:48:37.066799 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-spc7x" podStartSLOduration=5.550970334 podStartE2EDuration="16.066776796s" podCreationTimestamp="2026-01-20 16:48:21 +0000 UTC" firstStartedPulling="2026-01-20 16:48:25.453507333 +0000 UTC m=+1023.698112129" lastFinishedPulling="2026-01-20 16:48:35.969313765 +0000 UTC m=+1034.213918591" observedRunningTime="2026-01-20 16:48:37.046716182 +0000 UTC m=+1035.291320988" watchObservedRunningTime="2026-01-20 16:48:37.066776796 +0000 UTC m=+1035.311381602" Jan 20 16:48:37 crc kubenswrapper[4995]: I0120 16:48:37.091365 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-vfjkw" podStartSLOduration=3.997478053 podStartE2EDuration="11.091345422s" podCreationTimestamp="2026-01-20 16:48:26 +0000 UTC" firstStartedPulling="2026-01-20 16:48:29.041976303 +0000 UTC m=+1027.286581119" lastFinishedPulling="2026-01-20 16:48:36.135843682 +0000 UTC m=+1034.380448488" observedRunningTime="2026-01-20 16:48:37.083709975 +0000 UTC m=+1035.328314791" watchObservedRunningTime="2026-01-20 16:48:37.091345422 +0000 UTC m=+1035.335950228" Jan 20 16:48:38 crc kubenswrapper[4995]: I0120 16:48:38.006477 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-spc7x" event={"ID":"54be3683-2d75-43fd-8301-e05b2a5103cc","Type":"ContainerStarted","Data":"34ad142a09280707ed9424182fc5df3e27f911bcb9081388621f883bebf4e396"} Jan 20 16:48:38 crc kubenswrapper[4995]: I0120 16:48:38.008609 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"929cb9df-f5d9-4b0b-972c-5b79b6e28ab8","Type":"ContainerStarted","Data":"57891d45d2a192f3f0b63b5b077ec58517535c1e9ff589829d69b927cf5650d7"} Jan 20 16:48:38 crc kubenswrapper[4995]: I0120 16:48:38.010181 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"79c459b9-ccad-49a5-b945-64903e2c5308","Type":"ContainerStarted","Data":"f9924f8ceedb006a1a3a2d00d1ed358cfb77191ca6fd9c24966a0a177abff2a8"} Jan 20 16:48:38 crc kubenswrapper[4995]: I0120 16:48:38.013316 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4055d0be-e174-4fb9-9026-1a0499fe9dc6","Type":"ContainerStarted","Data":"e43764df4b82f7c03cd027fee5d4c77391d8c5774ce51d750a84dc6225286250"} Jan 20 16:48:38 crc kubenswrapper[4995]: I0120 16:48:38.037191 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=7.892868571 podStartE2EDuration="18.03716923s" podCreationTimestamp="2026-01-20 16:48:20 +0000 UTC" firstStartedPulling="2026-01-20 16:48:25.823461605 +0000 UTC m=+1024.068066421" lastFinishedPulling="2026-01-20 16:48:35.967762264 +0000 UTC m=+1034.212367080" observedRunningTime="2026-01-20 16:48:38.028989048 +0000 UTC m=+1036.273593854" watchObservedRunningTime="2026-01-20 16:48:38.03716923 +0000 UTC m=+1036.281774036" Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.023006 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"9283bc9e-66ee-4ded-b64e-3bdca7f112b4","Type":"ContainerStarted","Data":"1cb891c3c388203170d71b42fcf3cda6f2c0838c914701c4dbb59facff831709"} Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.026014 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" event={"ID":"35de482a-a071-43c9-b69b-531bdb674894","Type":"ContainerStarted","Data":"658690bded7942e018398577bb0a53ad1d9922d1592d28ab9df61b3da01cbac9"} Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.026468 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.027537 4995 generic.go:334] "Generic (PLEG): container finished" podID="028bd686-8a70-4866-968f-c29ab470e44c" containerID="35ba839a8a765a2c5ea6ce9e7ccf020ebd2f27b1efa4929c4b7ea9487db18be0" exitCode=0 Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.027590 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q9nkf" event={"ID":"028bd686-8a70-4866-968f-c29ab470e44c","Type":"ContainerDied","Data":"35ba839a8a765a2c5ea6ce9e7ccf020ebd2f27b1efa4929c4b7ea9487db18be0"} Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.034623 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" event={"ID":"471d34ec-7dff-4d2c-92c6-3319ac7db16e","Type":"ContainerStarted","Data":"3ef0f6e2aecd5826069e89b5ecdf35890338dd8c7605b369c7b20287bc57bd0a"} Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.034654 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.048089 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=5.66133952 podStartE2EDuration="15.048055863s" podCreationTimestamp="2026-01-20 16:48:24 +0000 UTC" firstStartedPulling="2026-01-20 16:48:26.586037105 +0000 UTC m=+1024.830641911" lastFinishedPulling="2026-01-20 16:48:35.972753428 +0000 UTC m=+1034.217358254" observedRunningTime="2026-01-20 16:48:39.046359847 +0000 UTC m=+1037.290964653" watchObservedRunningTime="2026-01-20 16:48:39.048055863 +0000 UTC m=+1037.292660659" Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.080673 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" podStartSLOduration=12.080658147 podStartE2EDuration="12.080658147s" podCreationTimestamp="2026-01-20 16:48:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:48:39.075816156 +0000 UTC m=+1037.320420972" watchObservedRunningTime="2026-01-20 16:48:39.080658147 +0000 UTC m=+1037.325262953" Jan 20 16:48:39 crc kubenswrapper[4995]: I0120 16:48:39.132087 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" podStartSLOduration=13.132054911 podStartE2EDuration="13.132054911s" podCreationTimestamp="2026-01-20 16:48:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:48:39.120338803 +0000 UTC m=+1037.364943649" watchObservedRunningTime="2026-01-20 16:48:39.132054911 +0000 UTC m=+1037.376659707" Jan 20 16:48:40 crc kubenswrapper[4995]: I0120 16:48:40.077551 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:40 crc kubenswrapper[4995]: I0120 16:48:40.135194 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:40 crc kubenswrapper[4995]: I0120 16:48:40.709628 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:40 crc kubenswrapper[4995]: I0120 16:48:40.709978 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:40 crc kubenswrapper[4995]: I0120 16:48:40.749758 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.054465 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerStarted","Data":"d84b9b127c5fc1c34bfdadf3f2dc3ede4fd463384fdb78adf81799fb76219c66"} Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.058558 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q9nkf" event={"ID":"028bd686-8a70-4866-968f-c29ab470e44c","Type":"ContainerStarted","Data":"043372b6ce5624f5b9ebbb8da4dd4f109f614b3a9be03c778189fd6ec7cc0928"} Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.059122 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.102895 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.103560 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.271747 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.575712 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 20 16:48:41 crc kubenswrapper[4995]: E0120 16:48:41.576378 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab782ff5-9039-481c-92a5-65ffda6ad787" containerName="init" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.576399 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab782ff5-9039-481c-92a5-65ffda6ad787" containerName="init" Jan 20 16:48:41 crc kubenswrapper[4995]: E0120 16:48:41.576441 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" containerName="dnsmasq-dns" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.576449 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" containerName="dnsmasq-dns" Jan 20 16:48:41 crc kubenswrapper[4995]: E0120 16:48:41.576471 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" containerName="init" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.576478 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" containerName="init" Jan 20 16:48:41 crc kubenswrapper[4995]: E0120 16:48:41.576493 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab782ff5-9039-481c-92a5-65ffda6ad787" containerName="dnsmasq-dns" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.576500 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab782ff5-9039-481c-92a5-65ffda6ad787" containerName="dnsmasq-dns" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.576691 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="468dedb3-3ad1-4df5-9527-2d9e35f7609e" containerName="dnsmasq-dns" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.576712 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab782ff5-9039-481c-92a5-65ffda6ad787" containerName="dnsmasq-dns" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.577732 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.580784 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-gxklf" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.580911 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.583613 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.584113 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.642412 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.649241 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c79dd22d-f0b7-4102-a740-1e5c88a5a548-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.649305 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c79dd22d-f0b7-4102-a740-1e5c88a5a548-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.649376 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c79dd22d-f0b7-4102-a740-1e5c88a5a548-config\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.649484 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c79dd22d-f0b7-4102-a740-1e5c88a5a548-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.649636 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c79dd22d-f0b7-4102-a740-1e5c88a5a548-scripts\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.649747 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhbb7\" (UniqueName: \"kubernetes.io/projected/c79dd22d-f0b7-4102-a740-1e5c88a5a548-kube-api-access-lhbb7\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.649809 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c79dd22d-f0b7-4102-a740-1e5c88a5a548-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.751738 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c79dd22d-f0b7-4102-a740-1e5c88a5a548-scripts\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.751819 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhbb7\" (UniqueName: \"kubernetes.io/projected/c79dd22d-f0b7-4102-a740-1e5c88a5a548-kube-api-access-lhbb7\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.751878 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c79dd22d-f0b7-4102-a740-1e5c88a5a548-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.751927 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c79dd22d-f0b7-4102-a740-1e5c88a5a548-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.751967 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c79dd22d-f0b7-4102-a740-1e5c88a5a548-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.752015 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c79dd22d-f0b7-4102-a740-1e5c88a5a548-config\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.752045 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c79dd22d-f0b7-4102-a740-1e5c88a5a548-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.752493 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c79dd22d-f0b7-4102-a740-1e5c88a5a548-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.752691 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c79dd22d-f0b7-4102-a740-1e5c88a5a548-scripts\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.752857 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c79dd22d-f0b7-4102-a740-1e5c88a5a548-config\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.758888 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c79dd22d-f0b7-4102-a740-1e5c88a5a548-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.761988 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c79dd22d-f0b7-4102-a740-1e5c88a5a548-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.772860 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhbb7\" (UniqueName: \"kubernetes.io/projected/c79dd22d-f0b7-4102-a740-1e5c88a5a548-kube-api-access-lhbb7\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.775737 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c79dd22d-f0b7-4102-a740-1e5c88a5a548-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c79dd22d-f0b7-4102-a740-1e5c88a5a548\") " pod="openstack/ovn-northd-0" Jan 20 16:48:41 crc kubenswrapper[4995]: I0120 16:48:41.904156 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 20 16:48:42 crc kubenswrapper[4995]: I0120 16:48:42.067598 4995 generic.go:334] "Generic (PLEG): container finished" podID="a68274bb-aba1-4c92-85ae-8e043d5ac325" containerID="dff6f5cadbcd0920536da7364d0a4f1eb968e02fed5ebc5b4a46a2c312ab7584" exitCode=0 Jan 20 16:48:42 crc kubenswrapper[4995]: I0120 16:48:42.067684 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a68274bb-aba1-4c92-85ae-8e043d5ac325","Type":"ContainerDied","Data":"dff6f5cadbcd0920536da7364d0a4f1eb968e02fed5ebc5b4a46a2c312ab7584"} Jan 20 16:48:42 crc kubenswrapper[4995]: I0120 16:48:42.070298 4995 generic.go:334] "Generic (PLEG): container finished" podID="e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6" containerID="83d627eed7cbbafb29d57a5d4fe692c39405137012d4107d50eba3938ec1abe9" exitCode=0 Jan 20 16:48:42 crc kubenswrapper[4995]: I0120 16:48:42.070365 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6","Type":"ContainerDied","Data":"83d627eed7cbbafb29d57a5d4fe692c39405137012d4107d50eba3938ec1abe9"} Jan 20 16:48:42 crc kubenswrapper[4995]: I0120 16:48:42.081211 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-q9nkf" event={"ID":"028bd686-8a70-4866-968f-c29ab470e44c","Type":"ContainerStarted","Data":"445ccd4f5128d36cbe669d782e3bcc5c2f54b03e8d3bd1682fca8cb30e501646"} Jan 20 16:48:42 crc kubenswrapper[4995]: I0120 16:48:42.107679 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-q9nkf" podStartSLOduration=11.054986083 podStartE2EDuration="21.107655446s" podCreationTimestamp="2026-01-20 16:48:21 +0000 UTC" firstStartedPulling="2026-01-20 16:48:25.727561015 +0000 UTC m=+1023.972165831" lastFinishedPulling="2026-01-20 16:48:35.780230388 +0000 UTC m=+1034.024835194" observedRunningTime="2026-01-20 16:48:42.106512295 +0000 UTC m=+1040.351117111" watchObservedRunningTime="2026-01-20 16:48:42.107655446 +0000 UTC m=+1040.352260252" Jan 20 16:48:42 crc kubenswrapper[4995]: I0120 16:48:42.349413 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 20 16:48:43 crc kubenswrapper[4995]: I0120 16:48:43.090769 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c79dd22d-f0b7-4102-a740-1e5c88a5a548","Type":"ContainerStarted","Data":"2709b60bb044d7a63b1319b7563e85650e2241cad5231e7d74a2dee6d31f7f7a"} Jan 20 16:48:43 crc kubenswrapper[4995]: I0120 16:48:43.092762 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6","Type":"ContainerStarted","Data":"4f686d114149785d7069d084b25d7ce97b068b430b8e3af5b61f6af72f7dec7a"} Jan 20 16:48:43 crc kubenswrapper[4995]: I0120 16:48:43.097677 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"a68274bb-aba1-4c92-85ae-8e043d5ac325","Type":"ContainerStarted","Data":"49e1c57e189b8f3df2e44888121f2cbd738ad9acb25df4920d7d9399a0ddfc41"} Jan 20 16:48:43 crc kubenswrapper[4995]: I0120 16:48:43.098294 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:43 crc kubenswrapper[4995]: I0120 16:48:43.098325 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:48:43 crc kubenswrapper[4995]: I0120 16:48:43.119482 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=19.876734197 podStartE2EDuration="30.119459174s" podCreationTimestamp="2026-01-20 16:48:13 +0000 UTC" firstStartedPulling="2026-01-20 16:48:25.725015146 +0000 UTC m=+1023.969619952" lastFinishedPulling="2026-01-20 16:48:35.967740123 +0000 UTC m=+1034.212344929" observedRunningTime="2026-01-20 16:48:43.116160584 +0000 UTC m=+1041.360765400" watchObservedRunningTime="2026-01-20 16:48:43.119459174 +0000 UTC m=+1041.364063980" Jan 20 16:48:43 crc kubenswrapper[4995]: I0120 16:48:43.146879 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=18.250201857 podStartE2EDuration="29.146856776s" podCreationTimestamp="2026-01-20 16:48:14 +0000 UTC" firstStartedPulling="2026-01-20 16:48:24.88361271 +0000 UTC m=+1023.128217516" lastFinishedPulling="2026-01-20 16:48:35.780267639 +0000 UTC m=+1034.024872435" observedRunningTime="2026-01-20 16:48:43.136968468 +0000 UTC m=+1041.381573304" watchObservedRunningTime="2026-01-20 16:48:43.146856776 +0000 UTC m=+1041.391461572" Jan 20 16:48:44 crc kubenswrapper[4995]: I0120 16:48:44.591728 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 20 16:48:44 crc kubenswrapper[4995]: I0120 16:48:44.592350 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 20 16:48:45 crc kubenswrapper[4995]: I0120 16:48:45.115818 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c79dd22d-f0b7-4102-a740-1e5c88a5a548","Type":"ContainerStarted","Data":"7c32c4a6e5ef78cf436478c8f0f73698e425b49abfe68df0d151789054733562"} Jan 20 16:48:45 crc kubenswrapper[4995]: I0120 16:48:45.976600 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:45 crc kubenswrapper[4995]: I0120 16:48:45.976659 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:47 crc kubenswrapper[4995]: I0120 16:48:47.270426 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:47 crc kubenswrapper[4995]: I0120 16:48:47.504317 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:47 crc kubenswrapper[4995]: I0120 16:48:47.562114 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-6r8cj"] Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.138014 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c79dd22d-f0b7-4102-a740-1e5c88a5a548","Type":"ContainerStarted","Data":"f02c74edb7f70b6220caf77ecd767ce7c559d58ce85a8b9a67f4cac809cde0f2"} Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.138199 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" podUID="35de482a-a071-43c9-b69b-531bdb674894" containerName="dnsmasq-dns" containerID="cri-o://658690bded7942e018398577bb0a53ad1d9922d1592d28ab9df61b3da01cbac9" gracePeriod=10 Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.415713 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.451648 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-2z2z5"] Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.453513 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.482665 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2z2z5"] Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.580556 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-dns-svc\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.580619 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tszf\" (UniqueName: \"kubernetes.io/projected/c030f925-c98d-4500-bcff-340a978d5fbc-kube-api-access-7tszf\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.580762 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.580804 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.580838 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-config\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.682481 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.682534 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.682553 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-config\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.682587 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-dns-svc\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.682658 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tszf\" (UniqueName: \"kubernetes.io/projected/c030f925-c98d-4500-bcff-340a978d5fbc-kube-api-access-7tszf\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.683422 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.683422 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-config\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.683677 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-dns-svc\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.683945 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.700192 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tszf\" (UniqueName: \"kubernetes.io/projected/c030f925-c98d-4500-bcff-340a978d5fbc-kube-api-access-7tszf\") pod \"dnsmasq-dns-698758b865-2z2z5\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:48 crc kubenswrapper[4995]: I0120 16:48:48.838783 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.145552 4995 generic.go:334] "Generic (PLEG): container finished" podID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerID="d84b9b127c5fc1c34bfdadf3f2dc3ede4fd463384fdb78adf81799fb76219c66" exitCode=0 Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.145635 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerDied","Data":"d84b9b127c5fc1c34bfdadf3f2dc3ede4fd463384fdb78adf81799fb76219c66"} Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.148594 4995 generic.go:334] "Generic (PLEG): container finished" podID="35de482a-a071-43c9-b69b-531bdb674894" containerID="658690bded7942e018398577bb0a53ad1d9922d1592d28ab9df61b3da01cbac9" exitCode=0 Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.148640 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" event={"ID":"35de482a-a071-43c9-b69b-531bdb674894","Type":"ContainerDied","Data":"658690bded7942e018398577bb0a53ad1d9922d1592d28ab9df61b3da01cbac9"} Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.601654 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.619243 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.621873 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.622228 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.622244 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-vbblk" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.622545 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.624349 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.662058 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2z2z5"] Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.697634 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/3f11d1ef-8720-4a15-91b7-2ad1602194f7-lock\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.697704 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpcgj\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-kube-api-access-vpcgj\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.697724 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.697788 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/3f11d1ef-8720-4a15-91b7-2ad1602194f7-cache\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.697804 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.799518 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/3f11d1ef-8720-4a15-91b7-2ad1602194f7-lock\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.800049 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpcgj\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-kube-api-access-vpcgj\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.800171 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.800061 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/3f11d1ef-8720-4a15-91b7-2ad1602194f7-lock\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.800365 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/3f11d1ef-8720-4a15-91b7-2ad1602194f7-cache\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.800441 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: E0120 16:48:49.800406 4995 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 16:48:49 crc kubenswrapper[4995]: E0120 16:48:49.800528 4995 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 16:48:49 crc kubenswrapper[4995]: E0120 16:48:49.800604 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift podName:3f11d1ef-8720-4a15-91b7-2ad1602194f7 nodeName:}" failed. No retries permitted until 2026-01-20 16:48:50.300581356 +0000 UTC m=+1048.545186162 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift") pod "swift-storage-0" (UID: "3f11d1ef-8720-4a15-91b7-2ad1602194f7") : configmap "swift-ring-files" not found Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.800739 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/3f11d1ef-8720-4a15-91b7-2ad1602194f7-cache\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.800770 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.816490 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpcgj\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-kube-api-access-vpcgj\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:49 crc kubenswrapper[4995]: I0120 16:48:49.821470 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.086930 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-nmjp6"] Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.087988 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.091747 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.094052 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.095516 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.096215 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-nmjp6"] Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.157120 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2z2z5" event={"ID":"c030f925-c98d-4500-bcff-340a978d5fbc","Type":"ContainerStarted","Data":"c3664fd129bc9a39d8b7b4c54a9273ebe10a37e40a5d7818c41784d341b61a18"} Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.222785 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f955d94a-612b-4962-9745-ac012f2398b2-etc-swift\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.222883 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-combined-ca-bundle\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.222912 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-scripts\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.222950 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-ring-data-devices\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.223091 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zklg9\" (UniqueName: \"kubernetes.io/projected/f955d94a-612b-4962-9745-ac012f2398b2-kube-api-access-zklg9\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.223116 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-dispersionconf\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.223162 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-swiftconf\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.325013 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zklg9\" (UniqueName: \"kubernetes.io/projected/f955d94a-612b-4962-9745-ac012f2398b2-kube-api-access-zklg9\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.325164 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-dispersionconf\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.325829 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-swiftconf\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.326232 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f955d94a-612b-4962-9745-ac012f2398b2-etc-swift\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.326355 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-combined-ca-bundle\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.326426 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-scripts\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.326590 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-ring-data-devices\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.327013 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f955d94a-612b-4962-9745-ac012f2398b2-etc-swift\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: E0120 16:48:50.327694 4995 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 16:48:50 crc kubenswrapper[4995]: E0120 16:48:50.327770 4995 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 16:48:50 crc kubenswrapper[4995]: E0120 16:48:50.327866 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift podName:3f11d1ef-8720-4a15-91b7-2ad1602194f7 nodeName:}" failed. No retries permitted until 2026-01-20 16:48:51.327851764 +0000 UTC m=+1049.572456570 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift") pod "swift-storage-0" (UID: "3f11d1ef-8720-4a15-91b7-2ad1602194f7") : configmap "swift-ring-files" not found Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.327899 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-scripts\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.326997 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.329777 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-dispersionconf\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.330437 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-swiftconf\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.332241 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-ring-data-devices\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.332579 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-combined-ca-bundle\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.347319 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zklg9\" (UniqueName: \"kubernetes.io/projected/f955d94a-612b-4962-9745-ac012f2398b2-kube-api-access-zklg9\") pod \"swift-ring-rebalance-nmjp6\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.420749 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:48:50 crc kubenswrapper[4995]: I0120 16:48:50.866494 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-nmjp6"] Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.059865 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.143741 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-dns-svc\") pod \"35de482a-a071-43c9-b69b-531bdb674894\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.143820 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-config\") pod \"35de482a-a071-43c9-b69b-531bdb674894\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.143877 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-ovsdbserver-nb\") pod \"35de482a-a071-43c9-b69b-531bdb674894\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.143979 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zs2xj\" (UniqueName: \"kubernetes.io/projected/35de482a-a071-43c9-b69b-531bdb674894-kube-api-access-zs2xj\") pod \"35de482a-a071-43c9-b69b-531bdb674894\" (UID: \"35de482a-a071-43c9-b69b-531bdb674894\") " Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.149387 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35de482a-a071-43c9-b69b-531bdb674894-kube-api-access-zs2xj" (OuterVolumeSpecName: "kube-api-access-zs2xj") pod "35de482a-a071-43c9-b69b-531bdb674894" (UID: "35de482a-a071-43c9-b69b-531bdb674894"). InnerVolumeSpecName "kube-api-access-zs2xj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.167450 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" event={"ID":"35de482a-a071-43c9-b69b-531bdb674894","Type":"ContainerDied","Data":"e3d33ee2e9b27da732bc4b964b5eed162870af897c44585aa429b6e848cf4033"} Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.167763 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-6r8cj" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.167912 4995 scope.go:117] "RemoveContainer" containerID="658690bded7942e018398577bb0a53ad1d9922d1592d28ab9df61b3da01cbac9" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.169477 4995 generic.go:334] "Generic (PLEG): container finished" podID="c030f925-c98d-4500-bcff-340a978d5fbc" containerID="866a9da7e7f8b897690d43357405ee102251cf38d557814b2a885fed1f550de7" exitCode=0 Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.169557 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2z2z5" event={"ID":"c030f925-c98d-4500-bcff-340a978d5fbc","Type":"ContainerDied","Data":"866a9da7e7f8b897690d43357405ee102251cf38d557814b2a885fed1f550de7"} Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.173038 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-nmjp6" event={"ID":"f955d94a-612b-4962-9745-ac012f2398b2","Type":"ContainerStarted","Data":"92885a3cf2aa61b9e4286d84d0ef9624229b0da92a62d85f76cbfcd7b20eee2b"} Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.173292 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.204869 4995 scope.go:117] "RemoveContainer" containerID="bca0b5ae6a8e20b937afd169a29f21547f2397d92f6c811869f25809f8050d8d" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.213414 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "35de482a-a071-43c9-b69b-531bdb674894" (UID: "35de482a-a071-43c9-b69b-531bdb674894"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.223516 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-config" (OuterVolumeSpecName: "config") pod "35de482a-a071-43c9-b69b-531bdb674894" (UID: "35de482a-a071-43c9-b69b-531bdb674894"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.225207 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "35de482a-a071-43c9-b69b-531bdb674894" (UID: "35de482a-a071-43c9-b69b-531bdb674894"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.236831 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=8.636732786 podStartE2EDuration="10.236806044s" podCreationTimestamp="2026-01-20 16:48:41 +0000 UTC" firstStartedPulling="2026-01-20 16:48:42.366688056 +0000 UTC m=+1040.611292862" lastFinishedPulling="2026-01-20 16:48:43.966761314 +0000 UTC m=+1042.211366120" observedRunningTime="2026-01-20 16:48:51.21673257 +0000 UTC m=+1049.461337376" watchObservedRunningTime="2026-01-20 16:48:51.236806044 +0000 UTC m=+1049.481410880" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.246477 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.246514 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.246524 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/35de482a-a071-43c9-b69b-531bdb674894-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.246535 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zs2xj\" (UniqueName: \"kubernetes.io/projected/35de482a-a071-43c9-b69b-531bdb674894-kube-api-access-zs2xj\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.347590 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:51 crc kubenswrapper[4995]: E0120 16:48:51.347797 4995 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 16:48:51 crc kubenswrapper[4995]: E0120 16:48:51.347826 4995 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 16:48:51 crc kubenswrapper[4995]: E0120 16:48:51.347893 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift podName:3f11d1ef-8720-4a15-91b7-2ad1602194f7 nodeName:}" failed. No retries permitted until 2026-01-20 16:48:53.347871514 +0000 UTC m=+1051.592476320 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift") pod "swift-storage-0" (UID: "3f11d1ef-8720-4a15-91b7-2ad1602194f7") : configmap "swift-ring-files" not found Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.507924 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-6r8cj"] Jan 20 16:48:51 crc kubenswrapper[4995]: I0120 16:48:51.513602 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-6r8cj"] Jan 20 16:48:52 crc kubenswrapper[4995]: I0120 16:48:52.003566 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35de482a-a071-43c9-b69b-531bdb674894" path="/var/lib/kubelet/pods/35de482a-a071-43c9-b69b-531bdb674894/volumes" Jan 20 16:48:52 crc kubenswrapper[4995]: I0120 16:48:52.182999 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2z2z5" event={"ID":"c030f925-c98d-4500-bcff-340a978d5fbc","Type":"ContainerStarted","Data":"3a72c3fa3e75610ecc6e0f011ed410818051290cd5579ebfd79ef48c6e033505"} Jan 20 16:48:52 crc kubenswrapper[4995]: I0120 16:48:52.183061 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:52 crc kubenswrapper[4995]: I0120 16:48:52.208268 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-2z2z5" podStartSLOduration=4.208245878 podStartE2EDuration="4.208245878s" podCreationTimestamp="2026-01-20 16:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:48:52.198229966 +0000 UTC m=+1050.442834772" watchObservedRunningTime="2026-01-20 16:48:52.208245878 +0000 UTC m=+1050.452850694" Jan 20 16:48:53 crc kubenswrapper[4995]: I0120 16:48:53.204509 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 20 16:48:53 crc kubenswrapper[4995]: I0120 16:48:53.277735 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 20 16:48:53 crc kubenswrapper[4995]: I0120 16:48:53.395847 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:53 crc kubenswrapper[4995]: E0120 16:48:53.395986 4995 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 16:48:53 crc kubenswrapper[4995]: E0120 16:48:53.396001 4995 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 16:48:53 crc kubenswrapper[4995]: E0120 16:48:53.396047 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift podName:3f11d1ef-8720-4a15-91b7-2ad1602194f7 nodeName:}" failed. No retries permitted until 2026-01-20 16:48:57.396034034 +0000 UTC m=+1055.640638840 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift") pod "swift-storage-0" (UID: "3f11d1ef-8720-4a15-91b7-2ad1602194f7") : configmap "swift-ring-files" not found Jan 20 16:48:55 crc kubenswrapper[4995]: I0120 16:48:55.928429 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-195b-account-create-update-v2pzx"] Jan 20 16:48:55 crc kubenswrapper[4995]: E0120 16:48:55.929313 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35de482a-a071-43c9-b69b-531bdb674894" containerName="init" Jan 20 16:48:55 crc kubenswrapper[4995]: I0120 16:48:55.929330 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="35de482a-a071-43c9-b69b-531bdb674894" containerName="init" Jan 20 16:48:55 crc kubenswrapper[4995]: E0120 16:48:55.929362 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35de482a-a071-43c9-b69b-531bdb674894" containerName="dnsmasq-dns" Jan 20 16:48:55 crc kubenswrapper[4995]: I0120 16:48:55.929369 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="35de482a-a071-43c9-b69b-531bdb674894" containerName="dnsmasq-dns" Jan 20 16:48:55 crc kubenswrapper[4995]: I0120 16:48:55.929576 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="35de482a-a071-43c9-b69b-531bdb674894" containerName="dnsmasq-dns" Jan 20 16:48:55 crc kubenswrapper[4995]: I0120 16:48:55.930227 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:48:55 crc kubenswrapper[4995]: I0120 16:48:55.932675 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 20 16:48:55 crc kubenswrapper[4995]: I0120 16:48:55.954739 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-195b-account-create-update-v2pzx"] Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.026225 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-j56cw"] Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.028477 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-j56cw" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.047229 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d22gb\" (UniqueName: \"kubernetes.io/projected/51b9bb44-7353-47df-995b-88a44aed4e12-kube-api-access-d22gb\") pod \"keystone-195b-account-create-update-v2pzx\" (UID: \"51b9bb44-7353-47df-995b-88a44aed4e12\") " pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.047366 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b9bb44-7353-47df-995b-88a44aed4e12-operator-scripts\") pod \"keystone-195b-account-create-update-v2pzx\" (UID: \"51b9bb44-7353-47df-995b-88a44aed4e12\") " pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.058954 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-j56cw"] Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.095932 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.148736 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d22gb\" (UniqueName: \"kubernetes.io/projected/51b9bb44-7353-47df-995b-88a44aed4e12-kube-api-access-d22gb\") pod \"keystone-195b-account-create-update-v2pzx\" (UID: \"51b9bb44-7353-47df-995b-88a44aed4e12\") " pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.148803 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b9bb44-7353-47df-995b-88a44aed4e12-operator-scripts\") pod \"keystone-195b-account-create-update-v2pzx\" (UID: \"51b9bb44-7353-47df-995b-88a44aed4e12\") " pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.148891 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzrz5\" (UniqueName: \"kubernetes.io/projected/30229159-0c2b-429a-997f-647d3398832f-kube-api-access-lzrz5\") pod \"keystone-db-create-j56cw\" (UID: \"30229159-0c2b-429a-997f-647d3398832f\") " pod="openstack/keystone-db-create-j56cw" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.148953 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30229159-0c2b-429a-997f-647d3398832f-operator-scripts\") pod \"keystone-db-create-j56cw\" (UID: \"30229159-0c2b-429a-997f-647d3398832f\") " pod="openstack/keystone-db-create-j56cw" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.149963 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b9bb44-7353-47df-995b-88a44aed4e12-operator-scripts\") pod \"keystone-195b-account-create-update-v2pzx\" (UID: \"51b9bb44-7353-47df-995b-88a44aed4e12\") " pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.169320 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d22gb\" (UniqueName: \"kubernetes.io/projected/51b9bb44-7353-47df-995b-88a44aed4e12-kube-api-access-d22gb\") pod \"keystone-195b-account-create-update-v2pzx\" (UID: \"51b9bb44-7353-47df-995b-88a44aed4e12\") " pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.176771 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.245718 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-72pdn"] Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.247439 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-72pdn" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.248351 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.250812 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzrz5\" (UniqueName: \"kubernetes.io/projected/30229159-0c2b-429a-997f-647d3398832f-kube-api-access-lzrz5\") pod \"keystone-db-create-j56cw\" (UID: \"30229159-0c2b-429a-997f-647d3398832f\") " pod="openstack/keystone-db-create-j56cw" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.250921 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30229159-0c2b-429a-997f-647d3398832f-operator-scripts\") pod \"keystone-db-create-j56cw\" (UID: \"30229159-0c2b-429a-997f-647d3398832f\") " pod="openstack/keystone-db-create-j56cw" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.251455 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-77d5-account-create-update-pws59"] Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.252448 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.252961 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30229159-0c2b-429a-997f-647d3398832f-operator-scripts\") pod \"keystone-db-create-j56cw\" (UID: \"30229159-0c2b-429a-997f-647d3398832f\") " pod="openstack/keystone-db-create-j56cw" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.260133 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.260660 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-77d5-account-create-update-pws59"] Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.274704 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-72pdn"] Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.285109 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzrz5\" (UniqueName: \"kubernetes.io/projected/30229159-0c2b-429a-997f-647d3398832f-kube-api-access-lzrz5\") pod \"keystone-db-create-j56cw\" (UID: \"30229159-0c2b-429a-997f-647d3398832f\") " pod="openstack/keystone-db-create-j56cw" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.345236 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-j56cw" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.352710 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8806da75-604e-462a-a582-bd0446c83f09-operator-scripts\") pod \"placement-77d5-account-create-update-pws59\" (UID: \"8806da75-604e-462a-a582-bd0446c83f09\") " pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.352795 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5njdl\" (UniqueName: \"kubernetes.io/projected/8eceda26-e546-4f1c-b611-18056b30d199-kube-api-access-5njdl\") pod \"placement-db-create-72pdn\" (UID: \"8eceda26-e546-4f1c-b611-18056b30d199\") " pod="openstack/placement-db-create-72pdn" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.352823 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwcd6\" (UniqueName: \"kubernetes.io/projected/8806da75-604e-462a-a582-bd0446c83f09-kube-api-access-fwcd6\") pod \"placement-77d5-account-create-update-pws59\" (UID: \"8806da75-604e-462a-a582-bd0446c83f09\") " pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.352853 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eceda26-e546-4f1c-b611-18056b30d199-operator-scripts\") pod \"placement-db-create-72pdn\" (UID: \"8eceda26-e546-4f1c-b611-18056b30d199\") " pod="openstack/placement-db-create-72pdn" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.454950 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8806da75-604e-462a-a582-bd0446c83f09-operator-scripts\") pod \"placement-77d5-account-create-update-pws59\" (UID: \"8806da75-604e-462a-a582-bd0446c83f09\") " pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.455013 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5njdl\" (UniqueName: \"kubernetes.io/projected/8eceda26-e546-4f1c-b611-18056b30d199-kube-api-access-5njdl\") pod \"placement-db-create-72pdn\" (UID: \"8eceda26-e546-4f1c-b611-18056b30d199\") " pod="openstack/placement-db-create-72pdn" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.455035 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwcd6\" (UniqueName: \"kubernetes.io/projected/8806da75-604e-462a-a582-bd0446c83f09-kube-api-access-fwcd6\") pod \"placement-77d5-account-create-update-pws59\" (UID: \"8806da75-604e-462a-a582-bd0446c83f09\") " pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.455062 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eceda26-e546-4f1c-b611-18056b30d199-operator-scripts\") pod \"placement-db-create-72pdn\" (UID: \"8eceda26-e546-4f1c-b611-18056b30d199\") " pod="openstack/placement-db-create-72pdn" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.455763 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8806da75-604e-462a-a582-bd0446c83f09-operator-scripts\") pod \"placement-77d5-account-create-update-pws59\" (UID: \"8806da75-604e-462a-a582-bd0446c83f09\") " pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.455780 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eceda26-e546-4f1c-b611-18056b30d199-operator-scripts\") pod \"placement-db-create-72pdn\" (UID: \"8eceda26-e546-4f1c-b611-18056b30d199\") " pod="openstack/placement-db-create-72pdn" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.471071 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5njdl\" (UniqueName: \"kubernetes.io/projected/8eceda26-e546-4f1c-b611-18056b30d199-kube-api-access-5njdl\") pod \"placement-db-create-72pdn\" (UID: \"8eceda26-e546-4f1c-b611-18056b30d199\") " pod="openstack/placement-db-create-72pdn" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.472718 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwcd6\" (UniqueName: \"kubernetes.io/projected/8806da75-604e-462a-a582-bd0446c83f09-kube-api-access-fwcd6\") pod \"placement-77d5-account-create-update-pws59\" (UID: \"8806da75-604e-462a-a582-bd0446c83f09\") " pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.578133 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-72pdn" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.584659 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:48:56 crc kubenswrapper[4995]: I0120 16:48:56.962008 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 20 16:48:57 crc kubenswrapper[4995]: I0120 16:48:57.466693 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:48:57 crc kubenswrapper[4995]: E0120 16:48:57.466893 4995 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 16:48:57 crc kubenswrapper[4995]: E0120 16:48:57.467133 4995 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 16:48:57 crc kubenswrapper[4995]: E0120 16:48:57.467184 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift podName:3f11d1ef-8720-4a15-91b7-2ad1602194f7 nodeName:}" failed. No retries permitted until 2026-01-20 16:49:05.467166794 +0000 UTC m=+1063.711771600 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift") pod "swift-storage-0" (UID: "3f11d1ef-8720-4a15-91b7-2ad1602194f7") : configmap "swift-ring-files" not found Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.480957 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-sv4j2"] Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.483970 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-sv4j2" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.507849 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-sv4j2"] Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.519193 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-7759-account-create-update-mf5qn"] Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.520638 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.522721 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.534779 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-7759-account-create-update-mf5qn"] Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.573059 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-195b-account-create-update-v2pzx"] Jan 20 16:48:58 crc kubenswrapper[4995]: W0120 16:48:58.576040 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51b9bb44_7353_47df_995b_88a44aed4e12.slice/crio-451b348021745d4842f45131ddcb29aabb548579e1ea8d6a8213b5f2cef42abd WatchSource:0}: Error finding container 451b348021745d4842f45131ddcb29aabb548579e1ea8d6a8213b5f2cef42abd: Status 404 returned error can't find the container with id 451b348021745d4842f45131ddcb29aabb548579e1ea8d6a8213b5f2cef42abd Jan 20 16:48:58 crc kubenswrapper[4995]: W0120 16:48:58.581279 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30229159_0c2b_429a_997f_647d3398832f.slice/crio-7096da38df10e4d47c61dc11ef60aea6d8b40268491cac1b548400cdf3d66781 WatchSource:0}: Error finding container 7096da38df10e4d47c61dc11ef60aea6d8b40268491cac1b548400cdf3d66781: Status 404 returned error can't find the container with id 7096da38df10e4d47c61dc11ef60aea6d8b40268491cac1b548400cdf3d66781 Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.583251 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-j56cw"] Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.586525 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9127f5ea-7402-4155-b70f-2a4d382598ec-operator-scripts\") pod \"watcher-db-create-sv4j2\" (UID: \"9127f5ea-7402-4155-b70f-2a4d382598ec\") " pod="openstack/watcher-db-create-sv4j2" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.586611 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vf7s6\" (UniqueName: \"kubernetes.io/projected/9127f5ea-7402-4155-b70f-2a4d382598ec-kube-api-access-vf7s6\") pod \"watcher-db-create-sv4j2\" (UID: \"9127f5ea-7402-4155-b70f-2a4d382598ec\") " pod="openstack/watcher-db-create-sv4j2" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.678335 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-77d5-account-create-update-pws59"] Jan 20 16:48:58 crc kubenswrapper[4995]: W0120 16:48:58.684597 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8806da75_604e_462a_a582_bd0446c83f09.slice/crio-2d601fd6c36cd7a4dc1cd78bbfd51e634144ae8a2a58b8f3b3fad02890fd0dec WatchSource:0}: Error finding container 2d601fd6c36cd7a4dc1cd78bbfd51e634144ae8a2a58b8f3b3fad02890fd0dec: Status 404 returned error can't find the container with id 2d601fd6c36cd7a4dc1cd78bbfd51e634144ae8a2a58b8f3b3fad02890fd0dec Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.688061 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj96d\" (UniqueName: \"kubernetes.io/projected/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-kube-api-access-rj96d\") pod \"watcher-7759-account-create-update-mf5qn\" (UID: \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\") " pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.688140 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vf7s6\" (UniqueName: \"kubernetes.io/projected/9127f5ea-7402-4155-b70f-2a4d382598ec-kube-api-access-vf7s6\") pod \"watcher-db-create-sv4j2\" (UID: \"9127f5ea-7402-4155-b70f-2a4d382598ec\") " pod="openstack/watcher-db-create-sv4j2" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.688237 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-operator-scripts\") pod \"watcher-7759-account-create-update-mf5qn\" (UID: \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\") " pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.688313 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9127f5ea-7402-4155-b70f-2a4d382598ec-operator-scripts\") pod \"watcher-db-create-sv4j2\" (UID: \"9127f5ea-7402-4155-b70f-2a4d382598ec\") " pod="openstack/watcher-db-create-sv4j2" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.689785 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9127f5ea-7402-4155-b70f-2a4d382598ec-operator-scripts\") pod \"watcher-db-create-sv4j2\" (UID: \"9127f5ea-7402-4155-b70f-2a4d382598ec\") " pod="openstack/watcher-db-create-sv4j2" Jan 20 16:48:58 crc kubenswrapper[4995]: W0120 16:48:58.693638 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8eceda26_e546_4f1c_b611_18056b30d199.slice/crio-74a1dc943860e8fdcc9eb8fec1b9ee21603999e57573b6b040e64a066a2237b6 WatchSource:0}: Error finding container 74a1dc943860e8fdcc9eb8fec1b9ee21603999e57573b6b040e64a066a2237b6: Status 404 returned error can't find the container with id 74a1dc943860e8fdcc9eb8fec1b9ee21603999e57573b6b040e64a066a2237b6 Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.693888 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-72pdn"] Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.706746 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vf7s6\" (UniqueName: \"kubernetes.io/projected/9127f5ea-7402-4155-b70f-2a4d382598ec-kube-api-access-vf7s6\") pod \"watcher-db-create-sv4j2\" (UID: \"9127f5ea-7402-4155-b70f-2a4d382598ec\") " pod="openstack/watcher-db-create-sv4j2" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.789945 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-operator-scripts\") pod \"watcher-7759-account-create-update-mf5qn\" (UID: \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\") " pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.790067 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rj96d\" (UniqueName: \"kubernetes.io/projected/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-kube-api-access-rj96d\") pod \"watcher-7759-account-create-update-mf5qn\" (UID: \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\") " pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.790946 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-operator-scripts\") pod \"watcher-7759-account-create-update-mf5qn\" (UID: \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\") " pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.804943 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-sv4j2" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.811557 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rj96d\" (UniqueName: \"kubernetes.io/projected/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-kube-api-access-rj96d\") pod \"watcher-7759-account-create-update-mf5qn\" (UID: \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\") " pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.841273 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.843515 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.903954 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-dvx7h"] Jan 20 16:48:58 crc kubenswrapper[4995]: I0120 16:48:58.904566 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" podUID="471d34ec-7dff-4d2c-92c6-3319ac7db16e" containerName="dnsmasq-dns" containerID="cri-o://3ef0f6e2aecd5826069e89b5ecdf35890338dd8c7605b369c7b20287bc57bd0a" gracePeriod=10 Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.278525 4995 generic.go:334] "Generic (PLEG): container finished" podID="8eceda26-e546-4f1c-b611-18056b30d199" containerID="38d764a363ff2a902a260a9025f3bd3a1e3cf7e0b118ce276110b78e179a79d1" exitCode=0 Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.278935 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-72pdn" event={"ID":"8eceda26-e546-4f1c-b611-18056b30d199","Type":"ContainerDied","Data":"38d764a363ff2a902a260a9025f3bd3a1e3cf7e0b118ce276110b78e179a79d1"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.279038 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-72pdn" event={"ID":"8eceda26-e546-4f1c-b611-18056b30d199","Type":"ContainerStarted","Data":"74a1dc943860e8fdcc9eb8fec1b9ee21603999e57573b6b040e64a066a2237b6"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.281438 4995 generic.go:334] "Generic (PLEG): container finished" podID="51b9bb44-7353-47df-995b-88a44aed4e12" containerID="f7661154b178ed9c11b44bc425dc43803dd061f2132875a69ac65a72784f2c2d" exitCode=0 Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.281557 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-195b-account-create-update-v2pzx" event={"ID":"51b9bb44-7353-47df-995b-88a44aed4e12","Type":"ContainerDied","Data":"f7661154b178ed9c11b44bc425dc43803dd061f2132875a69ac65a72784f2c2d"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.281576 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-195b-account-create-update-v2pzx" event={"ID":"51b9bb44-7353-47df-995b-88a44aed4e12","Type":"ContainerStarted","Data":"451b348021745d4842f45131ddcb29aabb548579e1ea8d6a8213b5f2cef42abd"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.284150 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerStarted","Data":"69e9ca3a3017426111a09066f96d2d3197f0522063b303387009e21064ac1654"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.289191 4995 generic.go:334] "Generic (PLEG): container finished" podID="30229159-0c2b-429a-997f-647d3398832f" containerID="7d241ca40d3bf15210984a72e9bdc49d6cbc76aab1e07ac11aa9bcb17e078fcb" exitCode=0 Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.289239 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-j56cw" event={"ID":"30229159-0c2b-429a-997f-647d3398832f","Type":"ContainerDied","Data":"7d241ca40d3bf15210984a72e9bdc49d6cbc76aab1e07ac11aa9bcb17e078fcb"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.289260 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-j56cw" event={"ID":"30229159-0c2b-429a-997f-647d3398832f","Type":"ContainerStarted","Data":"7096da38df10e4d47c61dc11ef60aea6d8b40268491cac1b548400cdf3d66781"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.299637 4995 generic.go:334] "Generic (PLEG): container finished" podID="8806da75-604e-462a-a582-bd0446c83f09" containerID="359af61b0dac4d84d0461fa96ea32ff311bc8f52a31b1f3804cc6658fb436d17" exitCode=0 Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.300135 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-77d5-account-create-update-pws59" event={"ID":"8806da75-604e-462a-a582-bd0446c83f09","Type":"ContainerDied","Data":"359af61b0dac4d84d0461fa96ea32ff311bc8f52a31b1f3804cc6658fb436d17"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.300183 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-77d5-account-create-update-pws59" event={"ID":"8806da75-604e-462a-a582-bd0446c83f09","Type":"ContainerStarted","Data":"2d601fd6c36cd7a4dc1cd78bbfd51e634144ae8a2a58b8f3b3fad02890fd0dec"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.303834 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-nmjp6" event={"ID":"f955d94a-612b-4962-9745-ac012f2398b2","Type":"ContainerStarted","Data":"acc264f5fb9c46d4069f8033354bf163321deff41dba05b6ef1ec60f3cc35415"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.336461 4995 generic.go:334] "Generic (PLEG): container finished" podID="471d34ec-7dff-4d2c-92c6-3319ac7db16e" containerID="3ef0f6e2aecd5826069e89b5ecdf35890338dd8c7605b369c7b20287bc57bd0a" exitCode=0 Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.336523 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" event={"ID":"471d34ec-7dff-4d2c-92c6-3319ac7db16e","Type":"ContainerDied","Data":"3ef0f6e2aecd5826069e89b5ecdf35890338dd8c7605b369c7b20287bc57bd0a"} Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.366139 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-sv4j2"] Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.397552 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-nmjp6" podStartSLOduration=2.2256898019999998 podStartE2EDuration="9.397534514s" podCreationTimestamp="2026-01-20 16:48:50 +0000 UTC" firstStartedPulling="2026-01-20 16:48:50.878575387 +0000 UTC m=+1049.123180193" lastFinishedPulling="2026-01-20 16:48:58.050420089 +0000 UTC m=+1056.295024905" observedRunningTime="2026-01-20 16:48:59.392525978 +0000 UTC m=+1057.637130774" watchObservedRunningTime="2026-01-20 16:48:59.397534514 +0000 UTC m=+1057.642139320" Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.504230 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-7759-account-create-update-mf5qn"] Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.541540 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.708356 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-dns-svc\") pod \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.708744 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-config\") pod \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.708846 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcd2x\" (UniqueName: \"kubernetes.io/projected/471d34ec-7dff-4d2c-92c6-3319ac7db16e-kube-api-access-mcd2x\") pod \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.708879 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-nb\") pod \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.708911 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-sb\") pod \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\" (UID: \"471d34ec-7dff-4d2c-92c6-3319ac7db16e\") " Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.728429 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/471d34ec-7dff-4d2c-92c6-3319ac7db16e-kube-api-access-mcd2x" (OuterVolumeSpecName: "kube-api-access-mcd2x") pod "471d34ec-7dff-4d2c-92c6-3319ac7db16e" (UID: "471d34ec-7dff-4d2c-92c6-3319ac7db16e"). InnerVolumeSpecName "kube-api-access-mcd2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.811538 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcd2x\" (UniqueName: \"kubernetes.io/projected/471d34ec-7dff-4d2c-92c6-3319ac7db16e-kube-api-access-mcd2x\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.886345 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "471d34ec-7dff-4d2c-92c6-3319ac7db16e" (UID: "471d34ec-7dff-4d2c-92c6-3319ac7db16e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.913602 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.957180 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "471d34ec-7dff-4d2c-92c6-3319ac7db16e" (UID: "471d34ec-7dff-4d2c-92c6-3319ac7db16e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.966994 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-config" (OuterVolumeSpecName: "config") pod "471d34ec-7dff-4d2c-92c6-3319ac7db16e" (UID: "471d34ec-7dff-4d2c-92c6-3319ac7db16e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:48:59 crc kubenswrapper[4995]: I0120 16:48:59.988033 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "471d34ec-7dff-4d2c-92c6-3319ac7db16e" (UID: "471d34ec-7dff-4d2c-92c6-3319ac7db16e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.021286 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.021351 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.021379 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/471d34ec-7dff-4d2c-92c6-3319ac7db16e-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.345576 4995 generic.go:334] "Generic (PLEG): container finished" podID="9127f5ea-7402-4155-b70f-2a4d382598ec" containerID="3a4aba26cb1810f15c420f4f9a2e770d9c9c0522ab039ca5a011f2fb01ccf9c5" exitCode=0 Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.345660 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-sv4j2" event={"ID":"9127f5ea-7402-4155-b70f-2a4d382598ec","Type":"ContainerDied","Data":"3a4aba26cb1810f15c420f4f9a2e770d9c9c0522ab039ca5a011f2fb01ccf9c5"} Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.345706 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-sv4j2" event={"ID":"9127f5ea-7402-4155-b70f-2a4d382598ec","Type":"ContainerStarted","Data":"507a43767984f4355911ea8ecedd127a7db94569d3a1f7ca428385ff0e1e27db"} Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.347217 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" event={"ID":"471d34ec-7dff-4d2c-92c6-3319ac7db16e","Type":"ContainerDied","Data":"9ffa2b6e8649787566c5a76f2bd0c2d54624972255e689030145332e519685ac"} Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.347275 4995 scope.go:117] "RemoveContainer" containerID="3ef0f6e2aecd5826069e89b5ecdf35890338dd8c7605b369c7b20287bc57bd0a" Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.347299 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-dvx7h" Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.348576 4995 generic.go:334] "Generic (PLEG): container finished" podID="c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305" containerID="fc53de352d119f2763437fd1e330416bdd9e71b2df28d57fb606d2f1091ee0dd" exitCode=0 Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.348744 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-7759-account-create-update-mf5qn" event={"ID":"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305","Type":"ContainerDied","Data":"fc53de352d119f2763437fd1e330416bdd9e71b2df28d57fb606d2f1091ee0dd"} Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.348770 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-7759-account-create-update-mf5qn" event={"ID":"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305","Type":"ContainerStarted","Data":"5381eb952be0442176b2074d4d5829ea0e2a58b7a800e272583947d16cfe2461"} Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.371539 4995 scope.go:117] "RemoveContainer" containerID="ab4295ff69aa5bc6204ffd09a2cc1d1efb33782fbf1061e6a5b406cf880cdff6" Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.381594 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-dvx7h"] Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.393311 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-dvx7h"] Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.754784 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:49:00 crc kubenswrapper[4995]: I0120 16:49:00.916784 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.369825 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8806da75-604e-462a-a582-bd0446c83f09-operator-scripts\") pod \"8806da75-604e-462a-a582-bd0446c83f09\" (UID: \"8806da75-604e-462a-a582-bd0446c83f09\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.369941 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwcd6\" (UniqueName: \"kubernetes.io/projected/8806da75-604e-462a-a582-bd0446c83f09-kube-api-access-fwcd6\") pod \"8806da75-604e-462a-a582-bd0446c83f09\" (UID: \"8806da75-604e-462a-a582-bd0446c83f09\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.375706 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8806da75-604e-462a-a582-bd0446c83f09-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8806da75-604e-462a-a582-bd0446c83f09" (UID: "8806da75-604e-462a-a582-bd0446c83f09"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.380650 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerStarted","Data":"57b2aae7cf41412fd19a4b06859ac07deff3ff9f6aa6a0b4bff8e0918cb8137d"} Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.383953 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-77d5-account-create-update-pws59" event={"ID":"8806da75-604e-462a-a582-bd0446c83f09","Type":"ContainerDied","Data":"2d601fd6c36cd7a4dc1cd78bbfd51e634144ae8a2a58b8f3b3fad02890fd0dec"} Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.383995 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d601fd6c36cd7a4dc1cd78bbfd51e634144ae8a2a58b8f3b3fad02890fd0dec" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.384015 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-77d5-account-create-update-pws59" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.389902 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8806da75-604e-462a-a582-bd0446c83f09-kube-api-access-fwcd6" (OuterVolumeSpecName: "kube-api-access-fwcd6") pod "8806da75-604e-462a-a582-bd0446c83f09" (UID: "8806da75-604e-462a-a582-bd0446c83f09"). InnerVolumeSpecName "kube-api-access-fwcd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.390535 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-72pdn" event={"ID":"8eceda26-e546-4f1c-b611-18056b30d199","Type":"ContainerDied","Data":"74a1dc943860e8fdcc9eb8fec1b9ee21603999e57573b6b040e64a066a2237b6"} Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.390570 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74a1dc943860e8fdcc9eb8fec1b9ee21603999e57573b6b040e64a066a2237b6" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.395941 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-195b-account-create-update-v2pzx" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.396314 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-195b-account-create-update-v2pzx" event={"ID":"51b9bb44-7353-47df-995b-88a44aed4e12","Type":"ContainerDied","Data":"451b348021745d4842f45131ddcb29aabb548579e1ea8d6a8213b5f2cef42abd"} Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.396346 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="451b348021745d4842f45131ddcb29aabb548579e1ea8d6a8213b5f2cef42abd" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.473827 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b9bb44-7353-47df-995b-88a44aed4e12-operator-scripts\") pod \"51b9bb44-7353-47df-995b-88a44aed4e12\" (UID: \"51b9bb44-7353-47df-995b-88a44aed4e12\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.474297 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d22gb\" (UniqueName: \"kubernetes.io/projected/51b9bb44-7353-47df-995b-88a44aed4e12-kube-api-access-d22gb\") pod \"51b9bb44-7353-47df-995b-88a44aed4e12\" (UID: \"51b9bb44-7353-47df-995b-88a44aed4e12\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.475691 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8806da75-604e-462a-a582-bd0446c83f09-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.475793 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwcd6\" (UniqueName: \"kubernetes.io/projected/8806da75-604e-462a-a582-bd0446c83f09-kube-api-access-fwcd6\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.476463 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51b9bb44-7353-47df-995b-88a44aed4e12-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "51b9bb44-7353-47df-995b-88a44aed4e12" (UID: "51b9bb44-7353-47df-995b-88a44aed4e12"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.491148 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-5sbj9"] Jan 20 16:49:01 crc kubenswrapper[4995]: E0120 16:49:01.491616 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="471d34ec-7dff-4d2c-92c6-3319ac7db16e" containerName="dnsmasq-dns" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.491632 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="471d34ec-7dff-4d2c-92c6-3319ac7db16e" containerName="dnsmasq-dns" Jan 20 16:49:01 crc kubenswrapper[4995]: E0120 16:49:01.491653 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8806da75-604e-462a-a582-bd0446c83f09" containerName="mariadb-account-create-update" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.491664 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8806da75-604e-462a-a582-bd0446c83f09" containerName="mariadb-account-create-update" Jan 20 16:49:01 crc kubenswrapper[4995]: E0120 16:49:01.491674 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b9bb44-7353-47df-995b-88a44aed4e12" containerName="mariadb-account-create-update" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.491683 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b9bb44-7353-47df-995b-88a44aed4e12" containerName="mariadb-account-create-update" Jan 20 16:49:01 crc kubenswrapper[4995]: E0120 16:49:01.491702 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="471d34ec-7dff-4d2c-92c6-3319ac7db16e" containerName="init" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.491709 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="471d34ec-7dff-4d2c-92c6-3319ac7db16e" containerName="init" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.491913 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8806da75-604e-462a-a582-bd0446c83f09" containerName="mariadb-account-create-update" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.491942 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="471d34ec-7dff-4d2c-92c6-3319ac7db16e" containerName="dnsmasq-dns" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.491957 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="51b9bb44-7353-47df-995b-88a44aed4e12" containerName="mariadb-account-create-update" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.492603 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.500692 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-c670-account-create-update-nkw7r"] Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.501742 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.503739 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.510346 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5sbj9"] Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.517243 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-c670-account-create-update-nkw7r"] Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.517513 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51b9bb44-7353-47df-995b-88a44aed4e12-kube-api-access-d22gb" (OuterVolumeSpecName: "kube-api-access-d22gb") pod "51b9bb44-7353-47df-995b-88a44aed4e12" (UID: "51b9bb44-7353-47df-995b-88a44aed4e12"). InnerVolumeSpecName "kube-api-access-d22gb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.559116 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-72pdn" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.570469 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-j56cw" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.577885 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr9jz\" (UniqueName: \"kubernetes.io/projected/08e0de87-a951-4bae-9915-9bbc5bf7ece5-kube-api-access-cr9jz\") pod \"glance-db-create-5sbj9\" (UID: \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\") " pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.578047 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e0de87-a951-4bae-9915-9bbc5bf7ece5-operator-scripts\") pod \"glance-db-create-5sbj9\" (UID: \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\") " pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.578305 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51b9bb44-7353-47df-995b-88a44aed4e12-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.578319 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d22gb\" (UniqueName: \"kubernetes.io/projected/51b9bb44-7353-47df-995b-88a44aed4e12-kube-api-access-d22gb\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.679580 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzrz5\" (UniqueName: \"kubernetes.io/projected/30229159-0c2b-429a-997f-647d3398832f-kube-api-access-lzrz5\") pod \"30229159-0c2b-429a-997f-647d3398832f\" (UID: \"30229159-0c2b-429a-997f-647d3398832f\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.679635 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eceda26-e546-4f1c-b611-18056b30d199-operator-scripts\") pod \"8eceda26-e546-4f1c-b611-18056b30d199\" (UID: \"8eceda26-e546-4f1c-b611-18056b30d199\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.679668 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5njdl\" (UniqueName: \"kubernetes.io/projected/8eceda26-e546-4f1c-b611-18056b30d199-kube-api-access-5njdl\") pod \"8eceda26-e546-4f1c-b611-18056b30d199\" (UID: \"8eceda26-e546-4f1c-b611-18056b30d199\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.679741 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30229159-0c2b-429a-997f-647d3398832f-operator-scripts\") pod \"30229159-0c2b-429a-997f-647d3398832f\" (UID: \"30229159-0c2b-429a-997f-647d3398832f\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.680061 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8eceda26-e546-4f1c-b611-18056b30d199-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8eceda26-e546-4f1c-b611-18056b30d199" (UID: "8eceda26-e546-4f1c-b611-18056b30d199"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.680114 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30229159-0c2b-429a-997f-647d3398832f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "30229159-0c2b-429a-997f-647d3398832f" (UID: "30229159-0c2b-429a-997f-647d3398832f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.682598 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr9jz\" (UniqueName: \"kubernetes.io/projected/08e0de87-a951-4bae-9915-9bbc5bf7ece5-kube-api-access-cr9jz\") pod \"glance-db-create-5sbj9\" (UID: \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\") " pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.682675 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scls5\" (UniqueName: \"kubernetes.io/projected/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-kube-api-access-scls5\") pod \"glance-c670-account-create-update-nkw7r\" (UID: \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\") " pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.682793 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e0de87-a951-4bae-9915-9bbc5bf7ece5-operator-scripts\") pod \"glance-db-create-5sbj9\" (UID: \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\") " pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.682865 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-operator-scripts\") pod \"glance-c670-account-create-update-nkw7r\" (UID: \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\") " pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.683445 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8eceda26-e546-4f1c-b611-18056b30d199-kube-api-access-5njdl" (OuterVolumeSpecName: "kube-api-access-5njdl") pod "8eceda26-e546-4f1c-b611-18056b30d199" (UID: "8eceda26-e546-4f1c-b611-18056b30d199"). InnerVolumeSpecName "kube-api-access-5njdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.683734 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30229159-0c2b-429a-997f-647d3398832f-kube-api-access-lzrz5" (OuterVolumeSpecName: "kube-api-access-lzrz5") pod "30229159-0c2b-429a-997f-647d3398832f" (UID: "30229159-0c2b-429a-997f-647d3398832f"). InnerVolumeSpecName "kube-api-access-lzrz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.684362 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e0de87-a951-4bae-9915-9bbc5bf7ece5-operator-scripts\") pod \"glance-db-create-5sbj9\" (UID: \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\") " pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.685614 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eceda26-e546-4f1c-b611-18056b30d199-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.685657 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30229159-0c2b-429a-997f-647d3398832f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.707271 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr9jz\" (UniqueName: \"kubernetes.io/projected/08e0de87-a951-4bae-9915-9bbc5bf7ece5-kube-api-access-cr9jz\") pod \"glance-db-create-5sbj9\" (UID: \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\") " pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.787606 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scls5\" (UniqueName: \"kubernetes.io/projected/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-kube-api-access-scls5\") pod \"glance-c670-account-create-update-nkw7r\" (UID: \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\") " pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.788510 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-operator-scripts\") pod \"glance-c670-account-create-update-nkw7r\" (UID: \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\") " pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.788609 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzrz5\" (UniqueName: \"kubernetes.io/projected/30229159-0c2b-429a-997f-647d3398832f-kube-api-access-lzrz5\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.788622 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5njdl\" (UniqueName: \"kubernetes.io/projected/8eceda26-e546-4f1c-b611-18056b30d199-kube-api-access-5njdl\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.789628 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-operator-scripts\") pod \"glance-c670-account-create-update-nkw7r\" (UID: \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\") " pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.798765 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-sv4j2" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.805467 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scls5\" (UniqueName: \"kubernetes.io/projected/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-kube-api-access-scls5\") pod \"glance-c670-account-create-update-nkw7r\" (UID: \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\") " pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.834515 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.882872 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.890021 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-operator-scripts\") pod \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\" (UID: \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.890176 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9127f5ea-7402-4155-b70f-2a4d382598ec-operator-scripts\") pod \"9127f5ea-7402-4155-b70f-2a4d382598ec\" (UID: \"9127f5ea-7402-4155-b70f-2a4d382598ec\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.890216 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rj96d\" (UniqueName: \"kubernetes.io/projected/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-kube-api-access-rj96d\") pod \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\" (UID: \"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.890388 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vf7s6\" (UniqueName: \"kubernetes.io/projected/9127f5ea-7402-4155-b70f-2a4d382598ec-kube-api-access-vf7s6\") pod \"9127f5ea-7402-4155-b70f-2a4d382598ec\" (UID: \"9127f5ea-7402-4155-b70f-2a4d382598ec\") " Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.891680 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9127f5ea-7402-4155-b70f-2a4d382598ec-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9127f5ea-7402-4155-b70f-2a4d382598ec" (UID: "9127f5ea-7402-4155-b70f-2a4d382598ec"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.891726 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305" (UID: "c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.893882 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-kube-api-access-rj96d" (OuterVolumeSpecName: "kube-api-access-rj96d") pod "c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305" (UID: "c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305"). InnerVolumeSpecName "kube-api-access-rj96d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.894105 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.894455 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9127f5ea-7402-4155-b70f-2a4d382598ec-kube-api-access-vf7s6" (OuterVolumeSpecName: "kube-api-access-vf7s6") pod "9127f5ea-7402-4155-b70f-2a4d382598ec" (UID: "9127f5ea-7402-4155-b70f-2a4d382598ec"). InnerVolumeSpecName "kube-api-access-vf7s6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.993942 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9127f5ea-7402-4155-b70f-2a4d382598ec-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.994212 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rj96d\" (UniqueName: \"kubernetes.io/projected/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-kube-api-access-rj96d\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.994224 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vf7s6\" (UniqueName: \"kubernetes.io/projected/9127f5ea-7402-4155-b70f-2a4d382598ec-kube-api-access-vf7s6\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:01 crc kubenswrapper[4995]: I0120 16:49:01.994233 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.021742 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="471d34ec-7dff-4d2c-92c6-3319ac7db16e" path="/var/lib/kubelet/pods/471d34ec-7dff-4d2c-92c6-3319ac7db16e/volumes" Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.346630 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-5sbj9"] Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.406003 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-j56cw" event={"ID":"30229159-0c2b-429a-997f-647d3398832f","Type":"ContainerDied","Data":"7096da38df10e4d47c61dc11ef60aea6d8b40268491cac1b548400cdf3d66781"} Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.406050 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7096da38df10e4d47c61dc11ef60aea6d8b40268491cac1b548400cdf3d66781" Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.406170 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-j56cw" Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.408455 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-sv4j2" event={"ID":"9127f5ea-7402-4155-b70f-2a4d382598ec","Type":"ContainerDied","Data":"507a43767984f4355911ea8ecedd127a7db94569d3a1f7ca428385ff0e1e27db"} Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.408480 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="507a43767984f4355911ea8ecedd127a7db94569d3a1f7ca428385ff0e1e27db" Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.408527 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-sv4j2" Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.410588 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5sbj9" event={"ID":"08e0de87-a951-4bae-9915-9bbc5bf7ece5","Type":"ContainerStarted","Data":"c961ab4ecb134502646359cc4960167a3e50a3c05b12f7cae65a73cd467ad9ba"} Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.412422 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-72pdn" Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.412780 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-7759-account-create-update-mf5qn" Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.413070 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-7759-account-create-update-mf5qn" event={"ID":"c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305","Type":"ContainerDied","Data":"5381eb952be0442176b2074d4d5829ea0e2a58b7a800e272583947d16cfe2461"} Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.413114 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5381eb952be0442176b2074d4d5829ea0e2a58b7a800e272583947d16cfe2461" Jan 20 16:49:02 crc kubenswrapper[4995]: I0120 16:49:02.421863 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-c670-account-create-update-nkw7r"] Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.249656 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-wtpkz"] Jan 20 16:49:03 crc kubenswrapper[4995]: E0120 16:49:03.250342 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9127f5ea-7402-4155-b70f-2a4d382598ec" containerName="mariadb-database-create" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.250373 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="9127f5ea-7402-4155-b70f-2a4d382598ec" containerName="mariadb-database-create" Jan 20 16:49:03 crc kubenswrapper[4995]: E0120 16:49:03.250392 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305" containerName="mariadb-account-create-update" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.250399 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305" containerName="mariadb-account-create-update" Jan 20 16:49:03 crc kubenswrapper[4995]: E0120 16:49:03.250419 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30229159-0c2b-429a-997f-647d3398832f" containerName="mariadb-database-create" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.250426 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="30229159-0c2b-429a-997f-647d3398832f" containerName="mariadb-database-create" Jan 20 16:49:03 crc kubenswrapper[4995]: E0120 16:49:03.250436 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eceda26-e546-4f1c-b611-18056b30d199" containerName="mariadb-database-create" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.250442 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eceda26-e546-4f1c-b611-18056b30d199" containerName="mariadb-database-create" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.250599 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8eceda26-e546-4f1c-b611-18056b30d199" containerName="mariadb-database-create" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.250618 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="30229159-0c2b-429a-997f-647d3398832f" containerName="mariadb-database-create" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.250628 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305" containerName="mariadb-account-create-update" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.250637 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="9127f5ea-7402-4155-b70f-2a4d382598ec" containerName="mariadb-database-create" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.251305 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.254295 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.256836 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-wtpkz"] Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.316790 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c73eac42-022c-4605-8bd1-62e6d420a752-operator-scripts\") pod \"root-account-create-update-wtpkz\" (UID: \"c73eac42-022c-4605-8bd1-62e6d420a752\") " pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.316853 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86pr8\" (UniqueName: \"kubernetes.io/projected/c73eac42-022c-4605-8bd1-62e6d420a752-kube-api-access-86pr8\") pod \"root-account-create-update-wtpkz\" (UID: \"c73eac42-022c-4605-8bd1-62e6d420a752\") " pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.418376 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c73eac42-022c-4605-8bd1-62e6d420a752-operator-scripts\") pod \"root-account-create-update-wtpkz\" (UID: \"c73eac42-022c-4605-8bd1-62e6d420a752\") " pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.418463 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86pr8\" (UniqueName: \"kubernetes.io/projected/c73eac42-022c-4605-8bd1-62e6d420a752-kube-api-access-86pr8\") pod \"root-account-create-update-wtpkz\" (UID: \"c73eac42-022c-4605-8bd1-62e6d420a752\") " pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.419151 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c73eac42-022c-4605-8bd1-62e6d420a752-operator-scripts\") pod \"root-account-create-update-wtpkz\" (UID: \"c73eac42-022c-4605-8bd1-62e6d420a752\") " pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.421551 4995 generic.go:334] "Generic (PLEG): container finished" podID="d5be9b31-4928-4381-b9df-6dbbbb20ca0c" containerID="544504c1fcb266975a10bf1fdcc8d5031f2f78a3bbeb6e0ee2a8d2d603ed8d6c" exitCode=0 Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.421626 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c670-account-create-update-nkw7r" event={"ID":"d5be9b31-4928-4381-b9df-6dbbbb20ca0c","Type":"ContainerDied","Data":"544504c1fcb266975a10bf1fdcc8d5031f2f78a3bbeb6e0ee2a8d2d603ed8d6c"} Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.421657 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c670-account-create-update-nkw7r" event={"ID":"d5be9b31-4928-4381-b9df-6dbbbb20ca0c","Type":"ContainerStarted","Data":"a23f5aeec2f2ec14c3a0fd6af30b3af0644a7e56f9eea872e6cb01ca4b795273"} Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.424365 4995 generic.go:334] "Generic (PLEG): container finished" podID="08e0de87-a951-4bae-9915-9bbc5bf7ece5" containerID="c9134720c2b281de70679f3892f95322c74a07648ce04c54c86d962bb59e5c2c" exitCode=0 Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.424401 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5sbj9" event={"ID":"08e0de87-a951-4bae-9915-9bbc5bf7ece5","Type":"ContainerDied","Data":"c9134720c2b281de70679f3892f95322c74a07648ce04c54c86d962bb59e5c2c"} Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.441020 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86pr8\" (UniqueName: \"kubernetes.io/projected/c73eac42-022c-4605-8bd1-62e6d420a752-kube-api-access-86pr8\") pod \"root-account-create-update-wtpkz\" (UID: \"c73eac42-022c-4605-8bd1-62e6d420a752\") " pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:03 crc kubenswrapper[4995]: I0120 16:49:03.573410 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.056382 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.064973 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.153993 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-operator-scripts\") pod \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\" (UID: \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\") " Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.154057 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scls5\" (UniqueName: \"kubernetes.io/projected/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-kube-api-access-scls5\") pod \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\" (UID: \"d5be9b31-4928-4381-b9df-6dbbbb20ca0c\") " Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.154235 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e0de87-a951-4bae-9915-9bbc5bf7ece5-operator-scripts\") pod \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\" (UID: \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\") " Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.154304 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cr9jz\" (UniqueName: \"kubernetes.io/projected/08e0de87-a951-4bae-9915-9bbc5bf7ece5-kube-api-access-cr9jz\") pod \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\" (UID: \"08e0de87-a951-4bae-9915-9bbc5bf7ece5\") " Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.154790 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d5be9b31-4928-4381-b9df-6dbbbb20ca0c" (UID: "d5be9b31-4928-4381-b9df-6dbbbb20ca0c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.155108 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08e0de87-a951-4bae-9915-9bbc5bf7ece5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "08e0de87-a951-4bae-9915-9bbc5bf7ece5" (UID: "08e0de87-a951-4bae-9915-9bbc5bf7ece5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.161362 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08e0de87-a951-4bae-9915-9bbc5bf7ece5-kube-api-access-cr9jz" (OuterVolumeSpecName: "kube-api-access-cr9jz") pod "08e0de87-a951-4bae-9915-9bbc5bf7ece5" (UID: "08e0de87-a951-4bae-9915-9bbc5bf7ece5"). InnerVolumeSpecName "kube-api-access-cr9jz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.178676 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-kube-api-access-scls5" (OuterVolumeSpecName: "kube-api-access-scls5") pod "d5be9b31-4928-4381-b9df-6dbbbb20ca0c" (UID: "d5be9b31-4928-4381-b9df-6dbbbb20ca0c"). InnerVolumeSpecName "kube-api-access-scls5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.256683 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e0de87-a951-4bae-9915-9bbc5bf7ece5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.256710 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cr9jz\" (UniqueName: \"kubernetes.io/projected/08e0de87-a951-4bae-9915-9bbc5bf7ece5-kube-api-access-cr9jz\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.256722 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.256732 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scls5\" (UniqueName: \"kubernetes.io/projected/d5be9b31-4928-4381-b9df-6dbbbb20ca0c-kube-api-access-scls5\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.380430 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-wtpkz"] Jan 20 16:49:05 crc kubenswrapper[4995]: W0120 16:49:05.382387 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc73eac42_022c_4605_8bd1_62e6d420a752.slice/crio-4d872155830707a30dfc2432bda79717906cf24fc0e70ea13d36d7d0d14d4c3b WatchSource:0}: Error finding container 4d872155830707a30dfc2432bda79717906cf24fc0e70ea13d36d7d0d14d4c3b: Status 404 returned error can't find the container with id 4d872155830707a30dfc2432bda79717906cf24fc0e70ea13d36d7d0d14d4c3b Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.439739 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-5sbj9" event={"ID":"08e0de87-a951-4bae-9915-9bbc5bf7ece5","Type":"ContainerDied","Data":"c961ab4ecb134502646359cc4960167a3e50a3c05b12f7cae65a73cd467ad9ba"} Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.439760 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-5sbj9" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.440121 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c961ab4ecb134502646359cc4960167a3e50a3c05b12f7cae65a73cd467ad9ba" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.441929 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerStarted","Data":"675b42b9706a61119c8c1aa2e9fa1162313829e6a95281026daecaa77a72938a"} Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.444558 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c670-account-create-update-nkw7r" event={"ID":"d5be9b31-4928-4381-b9df-6dbbbb20ca0c","Type":"ContainerDied","Data":"a23f5aeec2f2ec14c3a0fd6af30b3af0644a7e56f9eea872e6cb01ca4b795273"} Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.444612 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a23f5aeec2f2ec14c3a0fd6af30b3af0644a7e56f9eea872e6cb01ca4b795273" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.444693 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c670-account-create-update-nkw7r" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.450906 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-wtpkz" event={"ID":"c73eac42-022c-4605-8bd1-62e6d420a752","Type":"ContainerStarted","Data":"4d872155830707a30dfc2432bda79717906cf24fc0e70ea13d36d7d0d14d4c3b"} Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.475878 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=7.941405928 podStartE2EDuration="47.475863082s" podCreationTimestamp="2026-01-20 16:48:18 +0000 UTC" firstStartedPulling="2026-01-20 16:48:25.691511907 +0000 UTC m=+1023.936116713" lastFinishedPulling="2026-01-20 16:49:05.225969061 +0000 UTC m=+1063.470573867" observedRunningTime="2026-01-20 16:49:05.472536422 +0000 UTC m=+1063.717141238" watchObservedRunningTime="2026-01-20 16:49:05.475863082 +0000 UTC m=+1063.720467888" Jan 20 16:49:05 crc kubenswrapper[4995]: I0120 16:49:05.561306 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:49:05 crc kubenswrapper[4995]: E0120 16:49:05.561506 4995 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 16:49:05 crc kubenswrapper[4995]: E0120 16:49:05.561536 4995 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 16:49:05 crc kubenswrapper[4995]: E0120 16:49:05.561587 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift podName:3f11d1ef-8720-4a15-91b7-2ad1602194f7 nodeName:}" failed. No retries permitted until 2026-01-20 16:49:21.561569775 +0000 UTC m=+1079.806174581 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift") pod "swift-storage-0" (UID: "3f11d1ef-8720-4a15-91b7-2ad1602194f7") : configmap "swift-ring-files" not found Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.461658 4995 generic.go:334] "Generic (PLEG): container finished" podID="c73eac42-022c-4605-8bd1-62e6d420a752" containerID="53e8069b443f272a81a3258a117d49234e434225f6460e754babbea2299b9eca" exitCode=0 Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.461742 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-wtpkz" event={"ID":"c73eac42-022c-4605-8bd1-62e6d420a752","Type":"ContainerDied","Data":"53e8069b443f272a81a3258a117d49234e434225f6460e754babbea2299b9eca"} Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.464551 4995 generic.go:334] "Generic (PLEG): container finished" podID="f955d94a-612b-4962-9745-ac012f2398b2" containerID="acc264f5fb9c46d4069f8033354bf163321deff41dba05b6ef1ec60f3cc35415" exitCode=0 Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.464636 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-nmjp6" event={"ID":"f955d94a-612b-4962-9745-ac012f2398b2","Type":"ContainerDied","Data":"acc264f5fb9c46d4069f8033354bf163321deff41dba05b6ef1ec60f3cc35415"} Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.571871 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-spc7x" podUID="54be3683-2d75-43fd-8301-e05b2a5103cc" containerName="ovn-controller" probeResult="failure" output=< Jan 20 16:49:06 crc kubenswrapper[4995]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 20 16:49:06 crc kubenswrapper[4995]: > Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.690999 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-jdpl9"] Jan 20 16:49:06 crc kubenswrapper[4995]: E0120 16:49:06.691363 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5be9b31-4928-4381-b9df-6dbbbb20ca0c" containerName="mariadb-account-create-update" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.691383 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5be9b31-4928-4381-b9df-6dbbbb20ca0c" containerName="mariadb-account-create-update" Jan 20 16:49:06 crc kubenswrapper[4995]: E0120 16:49:06.691400 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e0de87-a951-4bae-9915-9bbc5bf7ece5" containerName="mariadb-database-create" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.691409 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e0de87-a951-4bae-9915-9bbc5bf7ece5" containerName="mariadb-database-create" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.691568 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5be9b31-4928-4381-b9df-6dbbbb20ca0c" containerName="mariadb-account-create-update" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.691581 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e0de87-a951-4bae-9915-9bbc5bf7ece5" containerName="mariadb-database-create" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.692131 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.694430 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-5lbqx" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.699651 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.701339 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jdpl9"] Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.783024 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-db-sync-config-data\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.783065 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-combined-ca-bundle\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.783167 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-config-data\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.783220 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzxjw\" (UniqueName: \"kubernetes.io/projected/6ecfbb2f-250c-4484-a20f-f45dce557abc-kube-api-access-dzxjw\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.884160 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-config-data\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.884224 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzxjw\" (UniqueName: \"kubernetes.io/projected/6ecfbb2f-250c-4484-a20f-f45dce557abc-kube-api-access-dzxjw\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.884287 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-db-sync-config-data\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.884308 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-combined-ca-bundle\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.891874 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-db-sync-config-data\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.892126 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-combined-ca-bundle\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.892313 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-config-data\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:06 crc kubenswrapper[4995]: I0120 16:49:06.906382 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzxjw\" (UniqueName: \"kubernetes.io/projected/6ecfbb2f-250c-4484-a20f-f45dce557abc-kube-api-access-dzxjw\") pod \"glance-db-sync-jdpl9\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.018232 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.644040 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jdpl9"] Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.869962 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.880749 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.901567 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86pr8\" (UniqueName: \"kubernetes.io/projected/c73eac42-022c-4605-8bd1-62e6d420a752-kube-api-access-86pr8\") pod \"c73eac42-022c-4605-8bd1-62e6d420a752\" (UID: \"c73eac42-022c-4605-8bd1-62e6d420a752\") " Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.901632 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-combined-ca-bundle\") pod \"f955d94a-612b-4962-9745-ac012f2398b2\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.901668 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-ring-data-devices\") pod \"f955d94a-612b-4962-9745-ac012f2398b2\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.901694 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-dispersionconf\") pod \"f955d94a-612b-4962-9745-ac012f2398b2\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.901738 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f955d94a-612b-4962-9745-ac012f2398b2-etc-swift\") pod \"f955d94a-612b-4962-9745-ac012f2398b2\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.901762 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-scripts\") pod \"f955d94a-612b-4962-9745-ac012f2398b2\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.901825 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-swiftconf\") pod \"f955d94a-612b-4962-9745-ac012f2398b2\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.901870 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zklg9\" (UniqueName: \"kubernetes.io/projected/f955d94a-612b-4962-9745-ac012f2398b2-kube-api-access-zklg9\") pod \"f955d94a-612b-4962-9745-ac012f2398b2\" (UID: \"f955d94a-612b-4962-9745-ac012f2398b2\") " Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.901891 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c73eac42-022c-4605-8bd1-62e6d420a752-operator-scripts\") pod \"c73eac42-022c-4605-8bd1-62e6d420a752\" (UID: \"c73eac42-022c-4605-8bd1-62e6d420a752\") " Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.902883 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c73eac42-022c-4605-8bd1-62e6d420a752-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c73eac42-022c-4605-8bd1-62e6d420a752" (UID: "c73eac42-022c-4605-8bd1-62e6d420a752"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.903232 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f955d94a-612b-4962-9745-ac012f2398b2-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "f955d94a-612b-4962-9745-ac012f2398b2" (UID: "f955d94a-612b-4962-9745-ac012f2398b2"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.903560 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "f955d94a-612b-4962-9745-ac012f2398b2" (UID: "f955d94a-612b-4962-9745-ac012f2398b2"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.907297 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c73eac42-022c-4605-8bd1-62e6d420a752-kube-api-access-86pr8" (OuterVolumeSpecName: "kube-api-access-86pr8") pod "c73eac42-022c-4605-8bd1-62e6d420a752" (UID: "c73eac42-022c-4605-8bd1-62e6d420a752"). InnerVolumeSpecName "kube-api-access-86pr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.910278 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f955d94a-612b-4962-9745-ac012f2398b2-kube-api-access-zklg9" (OuterVolumeSpecName: "kube-api-access-zklg9") pod "f955d94a-612b-4962-9745-ac012f2398b2" (UID: "f955d94a-612b-4962-9745-ac012f2398b2"). InnerVolumeSpecName "kube-api-access-zklg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.914642 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "f955d94a-612b-4962-9745-ac012f2398b2" (UID: "f955d94a-612b-4962-9745-ac012f2398b2"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.923680 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-scripts" (OuterVolumeSpecName: "scripts") pod "f955d94a-612b-4962-9745-ac012f2398b2" (UID: "f955d94a-612b-4962-9745-ac012f2398b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.930797 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "f955d94a-612b-4962-9745-ac012f2398b2" (UID: "f955d94a-612b-4962-9745-ac012f2398b2"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:07 crc kubenswrapper[4995]: I0120 16:49:07.939638 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f955d94a-612b-4962-9745-ac012f2398b2" (UID: "f955d94a-612b-4962-9745-ac012f2398b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.005599 4995 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/f955d94a-612b-4962-9745-ac012f2398b2-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.005932 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.006312 4995 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.006365 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zklg9\" (UniqueName: \"kubernetes.io/projected/f955d94a-612b-4962-9745-ac012f2398b2-kube-api-access-zklg9\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.006388 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c73eac42-022c-4605-8bd1-62e6d420a752-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.006407 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86pr8\" (UniqueName: \"kubernetes.io/projected/c73eac42-022c-4605-8bd1-62e6d420a752-kube-api-access-86pr8\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.006427 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.006445 4995 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/f955d94a-612b-4962-9745-ac012f2398b2-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.006462 4995 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/f955d94a-612b-4962-9745-ac012f2398b2-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.482672 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-wtpkz" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.482685 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-wtpkz" event={"ID":"c73eac42-022c-4605-8bd1-62e6d420a752","Type":"ContainerDied","Data":"4d872155830707a30dfc2432bda79717906cf24fc0e70ea13d36d7d0d14d4c3b"} Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.482736 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d872155830707a30dfc2432bda79717906cf24fc0e70ea13d36d7d0d14d4c3b" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.484107 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jdpl9" event={"ID":"6ecfbb2f-250c-4484-a20f-f45dce557abc","Type":"ContainerStarted","Data":"4bfced606611adb84da47d5315efde8902a485e307cd314bd8b1c6e4b42835a0"} Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.486709 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-nmjp6" event={"ID":"f955d94a-612b-4962-9745-ac012f2398b2","Type":"ContainerDied","Data":"92885a3cf2aa61b9e4286d84d0ef9624229b0da92a62d85f76cbfcd7b20eee2b"} Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.486731 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92885a3cf2aa61b9e4286d84d0ef9624229b0da92a62d85f76cbfcd7b20eee2b" Jan 20 16:49:08 crc kubenswrapper[4995]: I0120 16:49:08.486799 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nmjp6" Jan 20 16:49:09 crc kubenswrapper[4995]: I0120 16:49:09.617959 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-wtpkz"] Jan 20 16:49:09 crc kubenswrapper[4995]: I0120 16:49:09.625251 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-wtpkz"] Jan 20 16:49:09 crc kubenswrapper[4995]: I0120 16:49:09.725089 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:09 crc kubenswrapper[4995]: I0120 16:49:09.999303 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c73eac42-022c-4605-8bd1-62e6d420a752" path="/var/lib/kubelet/pods/c73eac42-022c-4605-8bd1-62e6d420a752/volumes" Jan 20 16:49:10 crc kubenswrapper[4995]: I0120 16:49:10.505006 4995 generic.go:334] "Generic (PLEG): container finished" podID="79c459b9-ccad-49a5-b945-64903e2c5308" containerID="f9924f8ceedb006a1a3a2d00d1ed358cfb77191ca6fd9c24966a0a177abff2a8" exitCode=0 Jan 20 16:49:10 crc kubenswrapper[4995]: I0120 16:49:10.505153 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"79c459b9-ccad-49a5-b945-64903e2c5308","Type":"ContainerDied","Data":"f9924f8ceedb006a1a3a2d00d1ed358cfb77191ca6fd9c24966a0a177abff2a8"} Jan 20 16:49:10 crc kubenswrapper[4995]: I0120 16:49:10.512300 4995 generic.go:334] "Generic (PLEG): container finished" podID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" containerID="e43764df4b82f7c03cd027fee5d4c77391d8c5774ce51d750a84dc6225286250" exitCode=0 Jan 20 16:49:10 crc kubenswrapper[4995]: I0120 16:49:10.512342 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4055d0be-e174-4fb9-9026-1a0499fe9dc6","Type":"ContainerDied","Data":"e43764df4b82f7c03cd027fee5d4c77391d8c5774ce51d750a84dc6225286250"} Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.522428 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"79c459b9-ccad-49a5-b945-64903e2c5308","Type":"ContainerStarted","Data":"a2b32144a30f90a91a08d21858dce811acfa706d9048a95a9b86d7591102fea1"} Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.523614 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.542554 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4055d0be-e174-4fb9-9026-1a0499fe9dc6","Type":"ContainerStarted","Data":"e3deb01ca2be4cb8c084ba433fa4731ea41a459683255dc3063bea01ec264540"} Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.543450 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.543550 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-spc7x" podUID="54be3683-2d75-43fd-8301-e05b2a5103cc" containerName="ovn-controller" probeResult="failure" output=< Jan 20 16:49:11 crc kubenswrapper[4995]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 20 16:49:11 crc kubenswrapper[4995]: > Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.545913 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=50.793493693 podStartE2EDuration="59.545897136s" podCreationTimestamp="2026-01-20 16:48:12 +0000 UTC" firstStartedPulling="2026-01-20 16:48:25.445372493 +0000 UTC m=+1023.689977299" lastFinishedPulling="2026-01-20 16:48:34.197775936 +0000 UTC m=+1032.442380742" observedRunningTime="2026-01-20 16:49:11.543902112 +0000 UTC m=+1069.788506948" watchObservedRunningTime="2026-01-20 16:49:11.545897136 +0000 UTC m=+1069.790501932" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.590395 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.592755 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=52.489673852 podStartE2EDuration="1m0.592746796s" podCreationTimestamp="2026-01-20 16:48:11 +0000 UTC" firstStartedPulling="2026-01-20 16:48:25.459660731 +0000 UTC m=+1023.704265537" lastFinishedPulling="2026-01-20 16:48:33.562733675 +0000 UTC m=+1031.807338481" observedRunningTime="2026-01-20 16:49:11.59143866 +0000 UTC m=+1069.836043486" watchObservedRunningTime="2026-01-20 16:49:11.592746796 +0000 UTC m=+1069.837351602" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.598718 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-q9nkf" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.830892 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-spc7x-config-9ndzj"] Jan 20 16:49:11 crc kubenswrapper[4995]: E0120 16:49:11.831394 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c73eac42-022c-4605-8bd1-62e6d420a752" containerName="mariadb-account-create-update" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.831417 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c73eac42-022c-4605-8bd1-62e6d420a752" containerName="mariadb-account-create-update" Jan 20 16:49:11 crc kubenswrapper[4995]: E0120 16:49:11.831446 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f955d94a-612b-4962-9745-ac012f2398b2" containerName="swift-ring-rebalance" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.831456 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f955d94a-612b-4962-9745-ac012f2398b2" containerName="swift-ring-rebalance" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.831655 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f955d94a-612b-4962-9745-ac012f2398b2" containerName="swift-ring-rebalance" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.831687 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c73eac42-022c-4605-8bd1-62e6d420a752" containerName="mariadb-account-create-update" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.832518 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.838104 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.841435 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-spc7x-config-9ndzj"] Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.994073 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-log-ovn\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.994175 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run-ovn\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.994230 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxvmf\" (UniqueName: \"kubernetes.io/projected/62a5fefb-2d2f-4652-a7fe-d867dd941e12-kube-api-access-gxvmf\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.994295 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-scripts\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.994328 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:11 crc kubenswrapper[4995]: I0120 16:49:11.994357 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-additional-scripts\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.096115 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-log-ovn\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.096178 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run-ovn\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.096220 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxvmf\" (UniqueName: \"kubernetes.io/projected/62a5fefb-2d2f-4652-a7fe-d867dd941e12-kube-api-access-gxvmf\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.096274 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-scripts\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.096303 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.096360 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-additional-scripts\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.096487 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.096488 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-log-ovn\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.096691 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run-ovn\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.097189 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-additional-scripts\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.098607 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-scripts\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.124200 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxvmf\" (UniqueName: \"kubernetes.io/projected/62a5fefb-2d2f-4652-a7fe-d867dd941e12-kube-api-access-gxvmf\") pod \"ovn-controller-spc7x-config-9ndzj\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.150353 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:12 crc kubenswrapper[4995]: I0120 16:49:12.591775 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-spc7x-config-9ndzj"] Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.263890 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-cxprs"] Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.267027 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.275824 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-cxprs"] Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.277264 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.319342 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrrxb\" (UniqueName: \"kubernetes.io/projected/3e7511e3-04f6-4270-8b11-65bd6528e1dd-kube-api-access-lrrxb\") pod \"root-account-create-update-cxprs\" (UID: \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\") " pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.319471 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e7511e3-04f6-4270-8b11-65bd6528e1dd-operator-scripts\") pod \"root-account-create-update-cxprs\" (UID: \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\") " pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.421516 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e7511e3-04f6-4270-8b11-65bd6528e1dd-operator-scripts\") pod \"root-account-create-update-cxprs\" (UID: \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\") " pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.421621 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrrxb\" (UniqueName: \"kubernetes.io/projected/3e7511e3-04f6-4270-8b11-65bd6528e1dd-kube-api-access-lrrxb\") pod \"root-account-create-update-cxprs\" (UID: \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\") " pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.422785 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e7511e3-04f6-4270-8b11-65bd6528e1dd-operator-scripts\") pod \"root-account-create-update-cxprs\" (UID: \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\") " pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.463825 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrrxb\" (UniqueName: \"kubernetes.io/projected/3e7511e3-04f6-4270-8b11-65bd6528e1dd-kube-api-access-lrrxb\") pod \"root-account-create-update-cxprs\" (UID: \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\") " pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.559512 4995 generic.go:334] "Generic (PLEG): container finished" podID="62a5fefb-2d2f-4652-a7fe-d867dd941e12" containerID="c6deeff9c8a9e9703bdcba95dd92ae95ee07ee540ee18aaeae513b1d738df8ec" exitCode=0 Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.559556 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-spc7x-config-9ndzj" event={"ID":"62a5fefb-2d2f-4652-a7fe-d867dd941e12","Type":"ContainerDied","Data":"c6deeff9c8a9e9703bdcba95dd92ae95ee07ee540ee18aaeae513b1d738df8ec"} Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.559582 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-spc7x-config-9ndzj" event={"ID":"62a5fefb-2d2f-4652-a7fe-d867dd941e12","Type":"ContainerStarted","Data":"2ce2642de9b99afb20c740cb090374f04a018288c2e91558312d0dbd998b5480"} Jan 20 16:49:13 crc kubenswrapper[4995]: I0120 16:49:13.634278 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:16 crc kubenswrapper[4995]: I0120 16:49:16.546489 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-spc7x" Jan 20 16:49:19 crc kubenswrapper[4995]: I0120 16:49:19.725489 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:19 crc kubenswrapper[4995]: I0120 16:49:19.728128 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:20 crc kubenswrapper[4995]: I0120 16:49:20.616911 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.076912 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.171137 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-scripts\") pod \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.171207 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxvmf\" (UniqueName: \"kubernetes.io/projected/62a5fefb-2d2f-4652-a7fe-d867dd941e12-kube-api-access-gxvmf\") pod \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.171280 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-additional-scripts\") pod \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.171363 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run-ovn\") pod \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.171401 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-log-ovn\") pod \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.171445 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run\") pod \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\" (UID: \"62a5fefb-2d2f-4652-a7fe-d867dd941e12\") " Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.171928 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "62a5fefb-2d2f-4652-a7fe-d867dd941e12" (UID: "62a5fefb-2d2f-4652-a7fe-d867dd941e12"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.171999 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run" (OuterVolumeSpecName: "var-run") pod "62a5fefb-2d2f-4652-a7fe-d867dd941e12" (UID: "62a5fefb-2d2f-4652-a7fe-d867dd941e12"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.172020 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "62a5fefb-2d2f-4652-a7fe-d867dd941e12" (UID: "62a5fefb-2d2f-4652-a7fe-d867dd941e12"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.172104 4995 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.172915 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "62a5fefb-2d2f-4652-a7fe-d867dd941e12" (UID: "62a5fefb-2d2f-4652-a7fe-d867dd941e12"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.173030 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-scripts" (OuterVolumeSpecName: "scripts") pod "62a5fefb-2d2f-4652-a7fe-d867dd941e12" (UID: "62a5fefb-2d2f-4652-a7fe-d867dd941e12"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.175678 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62a5fefb-2d2f-4652-a7fe-d867dd941e12-kube-api-access-gxvmf" (OuterVolumeSpecName: "kube-api-access-gxvmf") pod "62a5fefb-2d2f-4652-a7fe-d867dd941e12" (UID: "62a5fefb-2d2f-4652-a7fe-d867dd941e12"). InnerVolumeSpecName "kube-api-access-gxvmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.273130 4995 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-run\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.273163 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.273172 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxvmf\" (UniqueName: \"kubernetes.io/projected/62a5fefb-2d2f-4652-a7fe-d867dd941e12-kube-api-access-gxvmf\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.273182 4995 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/62a5fefb-2d2f-4652-a7fe-d867dd941e12-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.273190 4995 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/62a5fefb-2d2f-4652-a7fe-d867dd941e12-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.372566 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-cxprs"] Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.611734 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.619933 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f11d1ef-8720-4a15-91b7-2ad1602194f7-etc-swift\") pod \"swift-storage-0\" (UID: \"3f11d1ef-8720-4a15-91b7-2ad1602194f7\") " pod="openstack/swift-storage-0" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.622574 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x-config-9ndzj" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.625150 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-spc7x-config-9ndzj" event={"ID":"62a5fefb-2d2f-4652-a7fe-d867dd941e12","Type":"ContainerDied","Data":"2ce2642de9b99afb20c740cb090374f04a018288c2e91558312d0dbd998b5480"} Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.625210 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ce2642de9b99afb20c740cb090374f04a018288c2e91558312d0dbd998b5480" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.627481 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jdpl9" event={"ID":"6ecfbb2f-250c-4484-a20f-f45dce557abc","Type":"ContainerStarted","Data":"c86e63d95c4134414cc169f261f0f35f588248a3f48b521d54507b8864a03568"} Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.632681 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cxprs" event={"ID":"3e7511e3-04f6-4270-8b11-65bd6528e1dd","Type":"ContainerStarted","Data":"d3c51858ec15fcb97d24111291e64144788ada63c222c79602b2e2ae69d0004f"} Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.632732 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cxprs" event={"ID":"3e7511e3-04f6-4270-8b11-65bd6528e1dd","Type":"ContainerStarted","Data":"54ee1d9030c6d9161489d555877682549d88f8b078440e894b4c82c534aa4972"} Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.647710 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-jdpl9" podStartSLOduration=2.320263298 podStartE2EDuration="15.647689781s" podCreationTimestamp="2026-01-20 16:49:06 +0000 UTC" firstStartedPulling="2026-01-20 16:49:07.653633464 +0000 UTC m=+1065.898238280" lastFinishedPulling="2026-01-20 16:49:20.981059947 +0000 UTC m=+1079.225664763" observedRunningTime="2026-01-20 16:49:21.645408749 +0000 UTC m=+1079.890013555" watchObservedRunningTime="2026-01-20 16:49:21.647689781 +0000 UTC m=+1079.892294587" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.659271 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-cxprs" podStartSLOduration=8.659251714 podStartE2EDuration="8.659251714s" podCreationTimestamp="2026-01-20 16:49:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:21.656482019 +0000 UTC m=+1079.901086825" watchObservedRunningTime="2026-01-20 16:49:21.659251714 +0000 UTC m=+1079.903856520" Jan 20 16:49:21 crc kubenswrapper[4995]: I0120 16:49:21.763219 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.198146 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-spc7x-config-9ndzj"] Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.204117 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-spc7x-config-9ndzj"] Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.239070 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-spc7x-config-pghrb"] Jan 20 16:49:22 crc kubenswrapper[4995]: E0120 16:49:22.239416 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62a5fefb-2d2f-4652-a7fe-d867dd941e12" containerName="ovn-config" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.239433 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="62a5fefb-2d2f-4652-a7fe-d867dd941e12" containerName="ovn-config" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.239592 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="62a5fefb-2d2f-4652-a7fe-d867dd941e12" containerName="ovn-config" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.242035 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.243554 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.251718 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-spc7x-config-pghrb"] Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.325414 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-scripts\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.325545 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-log-ovn\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.325587 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-additional-scripts\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.325608 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgkzs\" (UniqueName: \"kubernetes.io/projected/3f25738b-1620-4598-985f-3907e8146fb8-kube-api-access-xgkzs\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.325668 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.325717 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run-ovn\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.337279 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427041 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-additional-scripts\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427126 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgkzs\" (UniqueName: \"kubernetes.io/projected/3f25738b-1620-4598-985f-3907e8146fb8-kube-api-access-xgkzs\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427190 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427266 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run-ovn\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427316 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-scripts\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427430 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-log-ovn\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427587 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-log-ovn\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427614 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run-ovn\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427591 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.427862 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-additional-scripts\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.429172 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-scripts\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.453320 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgkzs\" (UniqueName: \"kubernetes.io/projected/3f25738b-1620-4598-985f-3907e8146fb8-kube-api-access-xgkzs\") pod \"ovn-controller-spc7x-config-pghrb\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.556807 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.639496 4995 generic.go:334] "Generic (PLEG): container finished" podID="3e7511e3-04f6-4270-8b11-65bd6528e1dd" containerID="d3c51858ec15fcb97d24111291e64144788ada63c222c79602b2e2ae69d0004f" exitCode=0 Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.639607 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cxprs" event={"ID":"3e7511e3-04f6-4270-8b11-65bd6528e1dd","Type":"ContainerDied","Data":"d3c51858ec15fcb97d24111291e64144788ada63c222c79602b2e2ae69d0004f"} Jan 20 16:49:22 crc kubenswrapper[4995]: I0120 16:49:22.641298 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"9468dce8192c8de2cf1b0c9d1175528596e0226918f539688d493ab45e1e65d6"} Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.083470 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-spc7x-config-pghrb"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.223322 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.316662 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.316910 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="prometheus" containerID="cri-o://69e9ca3a3017426111a09066f96d2d3197f0522063b303387009e21064ac1654" gracePeriod=600 Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.317265 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="thanos-sidecar" containerID="cri-o://675b42b9706a61119c8c1aa2e9fa1162313829e6a95281026daecaa77a72938a" gracePeriod=600 Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.317318 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="config-reloader" containerID="cri-o://57b2aae7cf41412fd19a4b06859ac07deff3ff9f6aa6a0b4bff8e0918cb8137d" gracePeriod=600 Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.516557 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-ncfj6"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.520261 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.521022 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.523182 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.525485 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-gd6b5" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.531258 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-ncfj6"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.653819 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-combined-ca-bundle\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.653907 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-config-data\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.654117 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-db-sync-config-data\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.654152 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdfl8\" (UniqueName: \"kubernetes.io/projected/e5603781-3cf3-41db-bfc7-7dc74d244fd4-kube-api-access-zdfl8\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.659957 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-p9tsn"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.660997 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.682187 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-spc7x-config-pghrb" event={"ID":"3f25738b-1620-4598-985f-3907e8146fb8","Type":"ContainerStarted","Data":"3cac2fdf7109f5ce096e604dffc348e8c542529f192af67a63494176e5f22513"} Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.682246 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-spc7x-config-pghrb" event={"ID":"3f25738b-1620-4598-985f-3907e8146fb8","Type":"ContainerStarted","Data":"f7c8aec8016182351ab8da15a47e0706115d9dbce6330fffe8904fe8394827fb"} Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.687445 4995 generic.go:334] "Generic (PLEG): container finished" podID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerID="675b42b9706a61119c8c1aa2e9fa1162313829e6a95281026daecaa77a72938a" exitCode=0 Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.687471 4995 generic.go:334] "Generic (PLEG): container finished" podID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerID="69e9ca3a3017426111a09066f96d2d3197f0522063b303387009e21064ac1654" exitCode=0 Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.687617 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerDied","Data":"675b42b9706a61119c8c1aa2e9fa1162313829e6a95281026daecaa77a72938a"} Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.687641 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerDied","Data":"69e9ca3a3017426111a09066f96d2d3197f0522063b303387009e21064ac1654"} Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.719330 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-p9tsn"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.748143 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-0ab3-account-create-update-rspxp"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.751001 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.758898 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-db-sync-config-data\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.758942 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdfl8\" (UniqueName: \"kubernetes.io/projected/e5603781-3cf3-41db-bfc7-7dc74d244fd4-kube-api-access-zdfl8\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.758976 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-combined-ca-bundle\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.759020 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-config-data\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.759069 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/deceef39-8330-4f41-acb1-fbb4ee4f7d80-operator-scripts\") pod \"cinder-db-create-p9tsn\" (UID: \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\") " pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.759120 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmg8l\" (UniqueName: \"kubernetes.io/projected/deceef39-8330-4f41-acb1-fbb4ee4f7d80-kube-api-access-cmg8l\") pod \"cinder-db-create-p9tsn\" (UID: \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\") " pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.759417 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.768999 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-combined-ca-bundle\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.775601 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0ab3-account-create-update-rspxp"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.776877 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-spc7x-config-pghrb" podStartSLOduration=1.776860066 podStartE2EDuration="1.776860066s" podCreationTimestamp="2026-01-20 16:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:23.730558551 +0000 UTC m=+1081.975163357" watchObservedRunningTime="2026-01-20 16:49:23.776860066 +0000 UTC m=+1082.021464862" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.784637 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-db-sync-config-data\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.787859 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdfl8\" (UniqueName: \"kubernetes.io/projected/e5603781-3cf3-41db-bfc7-7dc74d244fd4-kube-api-access-zdfl8\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.801516 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-config-data\") pod \"watcher-db-sync-ncfj6\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.847871 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-h9j24"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.848929 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.857096 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.859985 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmg8l\" (UniqueName: \"kubernetes.io/projected/deceef39-8330-4f41-acb1-fbb4ee4f7d80-kube-api-access-cmg8l\") pod \"cinder-db-create-p9tsn\" (UID: \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\") " pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.860140 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-operator-scripts\") pod \"cinder-0ab3-account-create-update-rspxp\" (UID: \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\") " pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.860172 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2nz4\" (UniqueName: \"kubernetes.io/projected/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-kube-api-access-d2nz4\") pod \"cinder-0ab3-account-create-update-rspxp\" (UID: \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\") " pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.860190 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/deceef39-8330-4f41-acb1-fbb4ee4f7d80-operator-scripts\") pod \"cinder-db-create-p9tsn\" (UID: \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\") " pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.860801 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/deceef39-8330-4f41-acb1-fbb4ee4f7d80-operator-scripts\") pod \"cinder-db-create-p9tsn\" (UID: \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\") " pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.867499 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-h9j24"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.896411 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmg8l\" (UniqueName: \"kubernetes.io/projected/deceef39-8330-4f41-acb1-fbb4ee4f7d80-kube-api-access-cmg8l\") pod \"cinder-db-create-p9tsn\" (UID: \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\") " pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.959514 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-mbl5g"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.961445 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdg5z\" (UniqueName: \"kubernetes.io/projected/555925e6-eb4f-4c45-9151-f44a6fee3874-kube-api-access-pdg5z\") pod \"barbican-db-create-h9j24\" (UID: \"555925e6-eb4f-4c45-9151-f44a6fee3874\") " pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.961513 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-operator-scripts\") pod \"cinder-0ab3-account-create-update-rspxp\" (UID: \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\") " pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.961546 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2nz4\" (UniqueName: \"kubernetes.io/projected/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-kube-api-access-d2nz4\") pod \"cinder-0ab3-account-create-update-rspxp\" (UID: \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\") " pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.961635 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/555925e6-eb4f-4c45-9151-f44a6fee3874-operator-scripts\") pod \"barbican-db-create-h9j24\" (UID: \"555925e6-eb4f-4c45-9151-f44a6fee3874\") " pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.962408 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-operator-scripts\") pod \"cinder-0ab3-account-create-update-rspxp\" (UID: \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\") " pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.963001 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.983642 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mbl5g"] Jan 20 16:49:23 crc kubenswrapper[4995]: I0120 16:49:23.999457 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2nz4\" (UniqueName: \"kubernetes.io/projected/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-kube-api-access-d2nz4\") pod \"cinder-0ab3-account-create-update-rspxp\" (UID: \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\") " pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.022726 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.040895 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62a5fefb-2d2f-4652-a7fe-d867dd941e12" path="/var/lib/kubelet/pods/62a5fefb-2d2f-4652-a7fe-d867dd941e12/volumes" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.041499 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-07ce-account-create-update-gkng9"] Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.043213 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-07ce-account-create-update-gkng9"] Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.043296 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.047810 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.062972 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/555925e6-eb4f-4c45-9151-f44a6fee3874-operator-scripts\") pod \"barbican-db-create-h9j24\" (UID: \"555925e6-eb4f-4c45-9151-f44a6fee3874\") " pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.063053 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2afb13bf-8898-4ec7-b9f1-036467eec7fd-operator-scripts\") pod \"neutron-db-create-mbl5g\" (UID: \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\") " pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.063109 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdg5z\" (UniqueName: \"kubernetes.io/projected/555925e6-eb4f-4c45-9151-f44a6fee3874-kube-api-access-pdg5z\") pod \"barbican-db-create-h9j24\" (UID: \"555925e6-eb4f-4c45-9151-f44a6fee3874\") " pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.063182 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl2mw\" (UniqueName: \"kubernetes.io/projected/2afb13bf-8898-4ec7-b9f1-036467eec7fd-kube-api-access-pl2mw\") pod \"neutron-db-create-mbl5g\" (UID: \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\") " pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.064219 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/555925e6-eb4f-4c45-9151-f44a6fee3874-operator-scripts\") pod \"barbican-db-create-h9j24\" (UID: \"555925e6-eb4f-4c45-9151-f44a6fee3874\") " pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.076308 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-j7gvz"] Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.077390 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.080923 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.081127 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.109927 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.113518 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-m8prt" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.113827 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.122631 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdg5z\" (UniqueName: \"kubernetes.io/projected/555925e6-eb4f-4c45-9151-f44a6fee3874-kube-api-access-pdg5z\") pod \"barbican-db-create-h9j24\" (UID: \"555925e6-eb4f-4c45-9151-f44a6fee3874\") " pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.162508 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-j7gvz"] Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.164301 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-combined-ca-bundle\") pod \"keystone-db-sync-j7gvz\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.164360 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-config-data\") pod \"keystone-db-sync-j7gvz\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.164412 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sq5x\" (UniqueName: \"kubernetes.io/projected/17f5be4a-fe5c-414f-b2af-3e06500135ba-kube-api-access-5sq5x\") pod \"keystone-db-sync-j7gvz\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.164458 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl2mw\" (UniqueName: \"kubernetes.io/projected/2afb13bf-8898-4ec7-b9f1-036467eec7fd-kube-api-access-pl2mw\") pod \"neutron-db-create-mbl5g\" (UID: \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\") " pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.164526 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-operator-scripts\") pod \"neutron-07ce-account-create-update-gkng9\" (UID: \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\") " pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.164576 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls4nj\" (UniqueName: \"kubernetes.io/projected/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-kube-api-access-ls4nj\") pod \"neutron-07ce-account-create-update-gkng9\" (UID: \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\") " pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.164648 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2afb13bf-8898-4ec7-b9f1-036467eec7fd-operator-scripts\") pod \"neutron-db-create-mbl5g\" (UID: \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\") " pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.165390 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2afb13bf-8898-4ec7-b9f1-036467eec7fd-operator-scripts\") pod \"neutron-db-create-mbl5g\" (UID: \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\") " pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.181253 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-ee38-account-create-update-tg46v"] Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.182618 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.184473 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.187193 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.191812 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-ee38-account-create-update-tg46v"] Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.221832 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl2mw\" (UniqueName: \"kubernetes.io/projected/2afb13bf-8898-4ec7-b9f1-036467eec7fd-kube-api-access-pl2mw\") pod \"neutron-db-create-mbl5g\" (UID: \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\") " pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.267937 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-combined-ca-bundle\") pod \"keystone-db-sync-j7gvz\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.267972 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-config-data\") pod \"keystone-db-sync-j7gvz\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.268009 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sq5x\" (UniqueName: \"kubernetes.io/projected/17f5be4a-fe5c-414f-b2af-3e06500135ba-kube-api-access-5sq5x\") pod \"keystone-db-sync-j7gvz\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.268041 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-operator-scripts\") pod \"barbican-ee38-account-create-update-tg46v\" (UID: \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\") " pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.268087 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7zbh\" (UniqueName: \"kubernetes.io/projected/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-kube-api-access-j7zbh\") pod \"barbican-ee38-account-create-update-tg46v\" (UID: \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\") " pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.268128 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-operator-scripts\") pod \"neutron-07ce-account-create-update-gkng9\" (UID: \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\") " pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.268146 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls4nj\" (UniqueName: \"kubernetes.io/projected/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-kube-api-access-ls4nj\") pod \"neutron-07ce-account-create-update-gkng9\" (UID: \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\") " pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.272921 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-operator-scripts\") pod \"neutron-07ce-account-create-update-gkng9\" (UID: \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\") " pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.276126 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-config-data\") pod \"keystone-db-sync-j7gvz\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.278484 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-combined-ca-bundle\") pod \"keystone-db-sync-j7gvz\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.297440 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls4nj\" (UniqueName: \"kubernetes.io/projected/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-kube-api-access-ls4nj\") pod \"neutron-07ce-account-create-update-gkng9\" (UID: \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\") " pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.297449 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sq5x\" (UniqueName: \"kubernetes.io/projected/17f5be4a-fe5c-414f-b2af-3e06500135ba-kube-api-access-5sq5x\") pod \"keystone-db-sync-j7gvz\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.335693 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.373859 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-operator-scripts\") pod \"barbican-ee38-account-create-update-tg46v\" (UID: \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\") " pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.373901 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7zbh\" (UniqueName: \"kubernetes.io/projected/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-kube-api-access-j7zbh\") pod \"barbican-ee38-account-create-update-tg46v\" (UID: \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\") " pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.374654 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.374696 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-operator-scripts\") pod \"barbican-ee38-account-create-update-tg46v\" (UID: \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\") " pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.395331 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.400885 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7zbh\" (UniqueName: \"kubernetes.io/projected/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-kube-api-access-j7zbh\") pod \"barbican-ee38-account-create-update-tg46v\" (UID: \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\") " pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.454141 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.462775 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.576892 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrrxb\" (UniqueName: \"kubernetes.io/projected/3e7511e3-04f6-4270-8b11-65bd6528e1dd-kube-api-access-lrrxb\") pod \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\" (UID: \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\") " Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.577239 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e7511e3-04f6-4270-8b11-65bd6528e1dd-operator-scripts\") pod \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\" (UID: \"3e7511e3-04f6-4270-8b11-65bd6528e1dd\") " Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.578280 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e7511e3-04f6-4270-8b11-65bd6528e1dd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3e7511e3-04f6-4270-8b11-65bd6528e1dd" (UID: "3e7511e3-04f6-4270-8b11-65bd6528e1dd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.590288 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e7511e3-04f6-4270-8b11-65bd6528e1dd-kube-api-access-lrrxb" (OuterVolumeSpecName: "kube-api-access-lrrxb") pod "3e7511e3-04f6-4270-8b11-65bd6528e1dd" (UID: "3e7511e3-04f6-4270-8b11-65bd6528e1dd"). InnerVolumeSpecName "kube-api-access-lrrxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.630558 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-ncfj6"] Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.679005 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrrxb\" (UniqueName: \"kubernetes.io/projected/3e7511e3-04f6-4270-8b11-65bd6528e1dd-kube-api-access-lrrxb\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.679031 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e7511e3-04f6-4270-8b11-65bd6528e1dd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.698481 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-cxprs" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.698478 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-cxprs" event={"ID":"3e7511e3-04f6-4270-8b11-65bd6528e1dd","Type":"ContainerDied","Data":"54ee1d9030c6d9161489d555877682549d88f8b078440e894b4c82c534aa4972"} Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.698588 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54ee1d9030c6d9161489d555877682549d88f8b078440e894b4c82c534aa4972" Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.700861 4995 generic.go:334] "Generic (PLEG): container finished" podID="3f25738b-1620-4598-985f-3907e8146fb8" containerID="3cac2fdf7109f5ce096e604dffc348e8c542529f192af67a63494176e5f22513" exitCode=0 Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.700949 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-spc7x-config-pghrb" event={"ID":"3f25738b-1620-4598-985f-3907e8146fb8","Type":"ContainerDied","Data":"3cac2fdf7109f5ce096e604dffc348e8c542529f192af67a63494176e5f22513"} Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.706779 4995 generic.go:334] "Generic (PLEG): container finished" podID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerID="57b2aae7cf41412fd19a4b06859ac07deff3ff9f6aa6a0b4bff8e0918cb8137d" exitCode=0 Jan 20 16:49:24 crc kubenswrapper[4995]: I0120 16:49:24.706816 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerDied","Data":"57b2aae7cf41412fd19a4b06859ac07deff3ff9f6aa6a0b4bff8e0918cb8137d"} Jan 20 16:49:24 crc kubenswrapper[4995]: W0120 16:49:24.932005 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5603781_3cf3_41db_bfc7_7dc74d244fd4.slice/crio-f77b9403f8afb7551803eaf49a03d040951c15202bf4652cb444daa03b5f6302 WatchSource:0}: Error finding container f77b9403f8afb7551803eaf49a03d040951c15202bf4652cb444daa03b5f6302: Status 404 returned error can't find the container with id f77b9403f8afb7551803eaf49a03d040951c15202bf4652cb444daa03b5f6302 Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.258781 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.261297 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-p9tsn"] Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.405374 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.406139 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-tls-assets\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.406331 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.406384 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-0\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.406408 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-1\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.406446 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlp5p\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-kube-api-access-zlp5p\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.406490 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-web-config\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.406507 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-thanos-prometheus-http-client-file\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.406526 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-2\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.406603 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config-out\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.407247 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.417986 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.421874 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.440263 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-kube-api-access-zlp5p" (OuterVolumeSpecName: "kube-api-access-zlp5p") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "kube-api-access-zlp5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.452512 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.452683 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config-out" (OuterVolumeSpecName: "config-out") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.484325 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config" (OuterVolumeSpecName: "config") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.484464 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.485120 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-web-config" (OuterVolumeSpecName: "web-config") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:25 crc kubenswrapper[4995]: E0120 16:49:25.493604 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f podName:f96dc8ae-f537-4dc4-b6a5-62964c9b40ee nodeName:}" failed. No retries permitted until 2026-01-20 16:49:25.993581175 +0000 UTC m=+1084.238185981 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "prometheus-metric-storage-db" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee") : kubernetes.io/csi: Unmounter.TearDownAt failed: rpc error: code = Unknown desc = check target path: could not get consistent content of /proc/mounts after 3 attempts Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.518347 4995 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config-out\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.518372 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.518382 4995 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.518392 4995 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.518403 4995 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.518412 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlp5p\" (UniqueName: \"kubernetes.io/projected/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-kube-api-access-zlp5p\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.518424 4995 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-web-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.518432 4995 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.518441 4995 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.580233 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-j7gvz"] Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.711177 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-h9j24"] Jan 20 16:49:25 crc kubenswrapper[4995]: W0120 16:49:25.729176 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod555925e6_eb4f_4c45_9151_f44a6fee3874.slice/crio-35b60f487903db251e3f64957fc88010ccb06de67fa3322c2896c27bec378816 WatchSource:0}: Error finding container 35b60f487903db251e3f64957fc88010ccb06de67fa3322c2896c27bec378816: Status 404 returned error can't find the container with id 35b60f487903db251e3f64957fc88010ccb06de67fa3322c2896c27bec378816 Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.731974 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-07ce-account-create-update-gkng9"] Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.740648 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-0ab3-account-create-update-rspxp"] Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.756224 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-mbl5g"] Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.767605 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-ncfj6" event={"ID":"e5603781-3cf3-41db-bfc7-7dc74d244fd4","Type":"ContainerStarted","Data":"f77b9403f8afb7551803eaf49a03d040951c15202bf4652cb444daa03b5f6302"} Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.793255 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-ee38-account-create-update-tg46v"] Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.802160 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"b990bd287590c2b7238c01fdac753a8149fe4dbb214307bb010e0a2e1442cbeb"} Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.834583 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-p9tsn" event={"ID":"deceef39-8330-4f41-acb1-fbb4ee4f7d80","Type":"ContainerStarted","Data":"6a12cf77df1493bdbbf30cd2ea25adbd39efa775b0c1580b867f19e962352b86"} Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.837234 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-j7gvz" event={"ID":"17f5be4a-fe5c-414f-b2af-3e06500135ba","Type":"ContainerStarted","Data":"373dc034649074fc0f2479f8a3c01e0138a9af436755de9b089f0ccc5e5fa823"} Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.855274 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-p9tsn" podStartSLOduration=2.855256896 podStartE2EDuration="2.855256896s" podCreationTimestamp="2026-01-20 16:49:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:25.848982466 +0000 UTC m=+1084.093587272" watchObservedRunningTime="2026-01-20 16:49:25.855256896 +0000 UTC m=+1084.099861702" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.855934 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.855955 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee","Type":"ContainerDied","Data":"a18665aca8896dbe98c6ebad2216d9532b6f5e3f2f2c257305344d38743787b6"} Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.856032 4995 scope.go:117] "RemoveContainer" containerID="675b42b9706a61119c8c1aa2e9fa1162313829e6a95281026daecaa77a72938a" Jan 20 16:49:25 crc kubenswrapper[4995]: W0120 16:49:25.901369 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd799892_ab5d_43c3_aa1e_df5407ed9d7c.slice/crio-8ef95d2d1f5bd3efe9d62071ae76f28371a8850b5f79027f2d80364b446538cd WatchSource:0}: Error finding container 8ef95d2d1f5bd3efe9d62071ae76f28371a8850b5f79027f2d80364b446538cd: Status 404 returned error can't find the container with id 8ef95d2d1f5bd3efe9d62071ae76f28371a8850b5f79027f2d80364b446538cd Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.923860 4995 scope.go:117] "RemoveContainer" containerID="57b2aae7cf41412fd19a4b06859ac07deff3ff9f6aa6a0b4bff8e0918cb8137d" Jan 20 16:49:25 crc kubenswrapper[4995]: I0120 16:49:25.971960 4995 scope.go:117] "RemoveContainer" containerID="69e9ca3a3017426111a09066f96d2d3197f0522063b303387009e21064ac1654" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.053928 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\" (UID: \"f96dc8ae-f537-4dc4-b6a5-62964c9b40ee\") " Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.174924 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" (UID: "f96dc8ae-f537-4dc4-b6a5-62964c9b40ee"). InnerVolumeSpecName "pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.259273 4995 scope.go:117] "RemoveContainer" containerID="d84b9b127c5fc1c34bfdadf3f2dc3ede4fd463384fdb78adf81799fb76219c66" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.262031 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") on node \"crc\" " Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.309766 4995 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.309953 4995 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f") on node "crc" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.370860 4995 reconciler_common.go:293] "Volume detached for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.372499 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.386956 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.401907 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 16:49:26 crc kubenswrapper[4995]: E0120 16:49:26.402374 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="config-reloader" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.402388 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="config-reloader" Jan 20 16:49:26 crc kubenswrapper[4995]: E0120 16:49:26.402417 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="init-config-reloader" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.402423 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="init-config-reloader" Jan 20 16:49:26 crc kubenswrapper[4995]: E0120 16:49:26.402434 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="prometheus" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.402440 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="prometheus" Jan 20 16:49:26 crc kubenswrapper[4995]: E0120 16:49:26.402452 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e7511e3-04f6-4270-8b11-65bd6528e1dd" containerName="mariadb-account-create-update" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.402457 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e7511e3-04f6-4270-8b11-65bd6528e1dd" containerName="mariadb-account-create-update" Jan 20 16:49:26 crc kubenswrapper[4995]: E0120 16:49:26.402466 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="thanos-sidecar" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.402472 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="thanos-sidecar" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.402624 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="prometheus" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.402639 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="thanos-sidecar" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.402646 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e7511e3-04f6-4270-8b11-65bd6528e1dd" containerName="mariadb-account-create-update" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.402656 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="config-reloader" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.404116 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.407966 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-58l4k" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.408344 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.408536 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.408704 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.409589 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.409752 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.410539 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.411529 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.437684 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.443684 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573116 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28k5z\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-kube-api-access-28k5z\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573178 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573198 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573217 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573315 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573338 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573354 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573373 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573397 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ff6bba1f-8556-411d-bba9-b0274703ffea-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573415 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573460 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573481 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-config\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.573503 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.617001 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.675435 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.675540 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.675575 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.675630 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.676048 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ff6bba1f-8556-411d-bba9-b0274703ffea-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.676229 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.676323 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.676381 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-config\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.676516 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.677270 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.677315 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.678232 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28k5z\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-kube-api-access-28k5z\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.678332 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.678360 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.678390 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.678433 4995 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.678455 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a3a36306f7d3d2f24937466925b0b10e100df05e864ec7bc951230e86c72f354/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.679111 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.685146 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ff6bba1f-8556-411d-bba9-b0274703ffea-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.686404 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.691822 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.703109 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-config\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.703395 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.703505 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.706657 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.709658 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.709983 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28k5z\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-kube-api-access-28k5z\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.728216 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.779850 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-scripts\") pod \"3f25738b-1620-4598-985f-3907e8146fb8\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.779955 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-log-ovn\") pod \"3f25738b-1620-4598-985f-3907e8146fb8\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.779990 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run\") pod \"3f25738b-1620-4598-985f-3907e8146fb8\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.780088 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgkzs\" (UniqueName: \"kubernetes.io/projected/3f25738b-1620-4598-985f-3907e8146fb8-kube-api-access-xgkzs\") pod \"3f25738b-1620-4598-985f-3907e8146fb8\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.780139 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-additional-scripts\") pod \"3f25738b-1620-4598-985f-3907e8146fb8\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.780180 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run-ovn\") pod \"3f25738b-1620-4598-985f-3907e8146fb8\" (UID: \"3f25738b-1620-4598-985f-3907e8146fb8\") " Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.780254 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run" (OuterVolumeSpecName: "var-run") pod "3f25738b-1620-4598-985f-3907e8146fb8" (UID: "3f25738b-1620-4598-985f-3907e8146fb8"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.780319 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "3f25738b-1620-4598-985f-3907e8146fb8" (UID: "3f25738b-1620-4598-985f-3907e8146fb8"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.780605 4995 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.780625 4995 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.781331 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-scripts" (OuterVolumeSpecName: "scripts") pod "3f25738b-1620-4598-985f-3907e8146fb8" (UID: "3f25738b-1620-4598-985f-3907e8146fb8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.784991 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f25738b-1620-4598-985f-3907e8146fb8-kube-api-access-xgkzs" (OuterVolumeSpecName: "kube-api-access-xgkzs") pod "3f25738b-1620-4598-985f-3907e8146fb8" (UID: "3f25738b-1620-4598-985f-3907e8146fb8"). InnerVolumeSpecName "kube-api-access-xgkzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.781496 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "3f25738b-1620-4598-985f-3907e8146fb8" (UID: "3f25738b-1620-4598-985f-3907e8146fb8"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.786326 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "3f25738b-1620-4598-985f-3907e8146fb8" (UID: "3f25738b-1620-4598-985f-3907e8146fb8"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.866139 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"7621ca45b171767d3768fdeb7d9bab88c7d07c8631b19dbb7ddc4d3faa03d075"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.866181 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"d294292d1cc27f8dffffacd0a8ed669760ae729d593e8032e6784ac984a242b6"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.868342 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-ee38-account-create-update-tg46v" event={"ID":"cd799892-ab5d-43c3-aa1e-df5407ed9d7c","Type":"ContainerStarted","Data":"e6ee1ad2b9244080d618f0cad4d19e5883b2c3953baad4040c3c13f12ec62081"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.868370 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-ee38-account-create-update-tg46v" event={"ID":"cd799892-ab5d-43c3-aa1e-df5407ed9d7c","Type":"ContainerStarted","Data":"8ef95d2d1f5bd3efe9d62071ae76f28371a8850b5f79027f2d80364b446538cd"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.871844 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mbl5g" event={"ID":"2afb13bf-8898-4ec7-b9f1-036467eec7fd","Type":"ContainerStarted","Data":"81d167fedd3c0d73e6e9e8e54faca62bc2b909076389f6f205cb9d888ab130e9"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.871881 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mbl5g" event={"ID":"2afb13bf-8898-4ec7-b9f1-036467eec7fd","Type":"ContainerStarted","Data":"1cce59c6cdb53e0629954f993e1e360fdc0781755b9761a50dae18ec428b1fa9"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.878631 4995 generic.go:334] "Generic (PLEG): container finished" podID="555925e6-eb4f-4c45-9151-f44a6fee3874" containerID="97818658943d0c9c4b4ed1df2b1278e3f8dfe0d282ab3724994c2ec013c7d058" exitCode=0 Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.878706 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h9j24" event={"ID":"555925e6-eb4f-4c45-9151-f44a6fee3874","Type":"ContainerDied","Data":"97818658943d0c9c4b4ed1df2b1278e3f8dfe0d282ab3724994c2ec013c7d058"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.878735 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h9j24" event={"ID":"555925e6-eb4f-4c45-9151-f44a6fee3874","Type":"ContainerStarted","Data":"35b60f487903db251e3f64957fc88010ccb06de67fa3322c2896c27bec378816"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.881846 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgkzs\" (UniqueName: \"kubernetes.io/projected/3f25738b-1620-4598-985f-3907e8146fb8-kube-api-access-xgkzs\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.881880 4995 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.881895 4995 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3f25738b-1620-4598-985f-3907e8146fb8-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.881909 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f25738b-1620-4598-985f-3907e8146fb8-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.882044 4995 generic.go:334] "Generic (PLEG): container finished" podID="a97f544e-7335-4e51-9bfd-9c92bcd12cc6" containerID="f4f276489b3e827a855e4ff5a98bda8834902cd61645bbfca799795aca95cc9f" exitCode=0 Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.882145 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-07ce-account-create-update-gkng9" event={"ID":"a97f544e-7335-4e51-9bfd-9c92bcd12cc6","Type":"ContainerDied","Data":"f4f276489b3e827a855e4ff5a98bda8834902cd61645bbfca799795aca95cc9f"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.882183 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-07ce-account-create-update-gkng9" event={"ID":"a97f544e-7335-4e51-9bfd-9c92bcd12cc6","Type":"ContainerStarted","Data":"99f3978167175dffab13814033efa3392731e6c83a4f420da3194d9694f74f5f"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.886218 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-ee38-account-create-update-tg46v" podStartSLOduration=2.886199762 podStartE2EDuration="2.886199762s" podCreationTimestamp="2026-01-20 16:49:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:26.882582643 +0000 UTC m=+1085.127187469" watchObservedRunningTime="2026-01-20 16:49:26.886199762 +0000 UTC m=+1085.130804568" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.906440 4995 generic.go:334] "Generic (PLEG): container finished" podID="deceef39-8330-4f41-acb1-fbb4ee4f7d80" containerID="0403347ee00cbd0387e8082ffe31741e537067b2c547b0481045e7732d0c41c0" exitCode=0 Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.906507 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-p9tsn" event={"ID":"deceef39-8330-4f41-acb1-fbb4ee4f7d80","Type":"ContainerDied","Data":"0403347ee00cbd0387e8082ffe31741e537067b2c547b0481045e7732d0c41c0"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.915091 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.920234 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-spc7x-config-pghrb" event={"ID":"3f25738b-1620-4598-985f-3907e8146fb8","Type":"ContainerDied","Data":"f7c8aec8016182351ab8da15a47e0706115d9dbce6330fffe8904fe8394827fb"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.920289 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7c8aec8016182351ab8da15a47e0706115d9dbce6330fffe8904fe8394827fb" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.920387 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-spc7x-config-pghrb" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.932220 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-mbl5g" podStartSLOduration=3.932203338 podStartE2EDuration="3.932203338s" podCreationTimestamp="2026-01-20 16:49:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:26.929491064 +0000 UTC m=+1085.174095870" watchObservedRunningTime="2026-01-20 16:49:26.932203338 +0000 UTC m=+1085.176808144" Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.944114 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0ab3-account-create-update-rspxp" event={"ID":"baf5a0b0-860c-4777-bc84-f6dc4a17af4c","Type":"ContainerStarted","Data":"c498f7985f5ddf07870c6600a5528663f07a4f083a655568edce4766a64d167c"} Jan 20 16:49:26 crc kubenswrapper[4995]: I0120 16:49:26.944165 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0ab3-account-create-update-rspxp" event={"ID":"baf5a0b0-860c-4777-bc84-f6dc4a17af4c","Type":"ContainerStarted","Data":"b2bbafff632be39ad53e7fc56ceaada211567860fb09e84049ebe58ae04fe7d9"} Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.489017 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 16:49:27 crc kubenswrapper[4995]: W0120 16:49:27.493041 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff6bba1f_8556_411d_bba9_b0274703ffea.slice/crio-cc12bb5667e9b3460aa9afe50fd053766d71bc6557fc5b07f5ea4dd7c1bb72fb WatchSource:0}: Error finding container cc12bb5667e9b3460aa9afe50fd053766d71bc6557fc5b07f5ea4dd7c1bb72fb: Status 404 returned error can't find the container with id cc12bb5667e9b3460aa9afe50fd053766d71bc6557fc5b07f5ea4dd7c1bb72fb Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.686030 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-spc7x-config-pghrb"] Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.696582 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-spc7x-config-pghrb"] Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.725608 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.111:9090/-/ready\": dial tcp 10.217.0.111:9090: i/o timeout (Client.Timeout exceeded while awaiting headers)" Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.959977 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"ac52bd50b44531ca3c1dc1546ffde4c0fbb0cdaf6c1ddd38df4480139f79b67a"} Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.964566 4995 generic.go:334] "Generic (PLEG): container finished" podID="cd799892-ab5d-43c3-aa1e-df5407ed9d7c" containerID="e6ee1ad2b9244080d618f0cad4d19e5883b2c3953baad4040c3c13f12ec62081" exitCode=0 Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.964659 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-ee38-account-create-update-tg46v" event={"ID":"cd799892-ab5d-43c3-aa1e-df5407ed9d7c","Type":"ContainerDied","Data":"e6ee1ad2b9244080d618f0cad4d19e5883b2c3953baad4040c3c13f12ec62081"} Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.972374 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerStarted","Data":"cc12bb5667e9b3460aa9afe50fd053766d71bc6557fc5b07f5ea4dd7c1bb72fb"} Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.975764 4995 generic.go:334] "Generic (PLEG): container finished" podID="2afb13bf-8898-4ec7-b9f1-036467eec7fd" containerID="81d167fedd3c0d73e6e9e8e54faca62bc2b909076389f6f205cb9d888ab130e9" exitCode=0 Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.975832 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mbl5g" event={"ID":"2afb13bf-8898-4ec7-b9f1-036467eec7fd","Type":"ContainerDied","Data":"81d167fedd3c0d73e6e9e8e54faca62bc2b909076389f6f205cb9d888ab130e9"} Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.978181 4995 generic.go:334] "Generic (PLEG): container finished" podID="baf5a0b0-860c-4777-bc84-f6dc4a17af4c" containerID="c498f7985f5ddf07870c6600a5528663f07a4f083a655568edce4766a64d167c" exitCode=0 Jan 20 16:49:27 crc kubenswrapper[4995]: I0120 16:49:27.978283 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0ab3-account-create-update-rspxp" event={"ID":"baf5a0b0-860c-4777-bc84-f6dc4a17af4c","Type":"ContainerDied","Data":"c498f7985f5ddf07870c6600a5528663f07a4f083a655568edce4766a64d167c"} Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.002262 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f25738b-1620-4598-985f-3907e8146fb8" path="/var/lib/kubelet/pods/3f25738b-1620-4598-985f-3907e8146fb8/volumes" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.003135 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f96dc8ae-f537-4dc4-b6a5-62964c9b40ee" path="/var/lib/kubelet/pods/f96dc8ae-f537-4dc4-b6a5-62964c9b40ee/volumes" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.454726 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.521484 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-operator-scripts\") pod \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\" (UID: \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\") " Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.521606 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2nz4\" (UniqueName: \"kubernetes.io/projected/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-kube-api-access-d2nz4\") pod \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\" (UID: \"baf5a0b0-860c-4777-bc84-f6dc4a17af4c\") " Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.522992 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "baf5a0b0-860c-4777-bc84-f6dc4a17af4c" (UID: "baf5a0b0-860c-4777-bc84-f6dc4a17af4c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.534903 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-kube-api-access-d2nz4" (OuterVolumeSpecName: "kube-api-access-d2nz4") pod "baf5a0b0-860c-4777-bc84-f6dc4a17af4c" (UID: "baf5a0b0-860c-4777-bc84-f6dc4a17af4c"). InnerVolumeSpecName "kube-api-access-d2nz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.626255 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.626289 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2nz4\" (UniqueName: \"kubernetes.io/projected/baf5a0b0-860c-4777-bc84-f6dc4a17af4c-kube-api-access-d2nz4\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.681104 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.715522 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.718165 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.831440 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmg8l\" (UniqueName: \"kubernetes.io/projected/deceef39-8330-4f41-acb1-fbb4ee4f7d80-kube-api-access-cmg8l\") pod \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\" (UID: \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\") " Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.831492 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/deceef39-8330-4f41-acb1-fbb4ee4f7d80-operator-scripts\") pod \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\" (UID: \"deceef39-8330-4f41-acb1-fbb4ee4f7d80\") " Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.831559 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdg5z\" (UniqueName: \"kubernetes.io/projected/555925e6-eb4f-4c45-9151-f44a6fee3874-kube-api-access-pdg5z\") pod \"555925e6-eb4f-4c45-9151-f44a6fee3874\" (UID: \"555925e6-eb4f-4c45-9151-f44a6fee3874\") " Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.831651 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-operator-scripts\") pod \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\" (UID: \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\") " Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.831692 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ls4nj\" (UniqueName: \"kubernetes.io/projected/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-kube-api-access-ls4nj\") pod \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\" (UID: \"a97f544e-7335-4e51-9bfd-9c92bcd12cc6\") " Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.831742 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/555925e6-eb4f-4c45-9151-f44a6fee3874-operator-scripts\") pod \"555925e6-eb4f-4c45-9151-f44a6fee3874\" (UID: \"555925e6-eb4f-4c45-9151-f44a6fee3874\") " Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.832637 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/555925e6-eb4f-4c45-9151-f44a6fee3874-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "555925e6-eb4f-4c45-9151-f44a6fee3874" (UID: "555925e6-eb4f-4c45-9151-f44a6fee3874"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.832654 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/deceef39-8330-4f41-acb1-fbb4ee4f7d80-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "deceef39-8330-4f41-acb1-fbb4ee4f7d80" (UID: "deceef39-8330-4f41-acb1-fbb4ee4f7d80"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.833173 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a97f544e-7335-4e51-9bfd-9c92bcd12cc6" (UID: "a97f544e-7335-4e51-9bfd-9c92bcd12cc6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.837643 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/deceef39-8330-4f41-acb1-fbb4ee4f7d80-kube-api-access-cmg8l" (OuterVolumeSpecName: "kube-api-access-cmg8l") pod "deceef39-8330-4f41-acb1-fbb4ee4f7d80" (UID: "deceef39-8330-4f41-acb1-fbb4ee4f7d80"). InnerVolumeSpecName "kube-api-access-cmg8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.838119 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/555925e6-eb4f-4c45-9151-f44a6fee3874-kube-api-access-pdg5z" (OuterVolumeSpecName: "kube-api-access-pdg5z") pod "555925e6-eb4f-4c45-9151-f44a6fee3874" (UID: "555925e6-eb4f-4c45-9151-f44a6fee3874"). InnerVolumeSpecName "kube-api-access-pdg5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.838952 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-kube-api-access-ls4nj" (OuterVolumeSpecName: "kube-api-access-ls4nj") pod "a97f544e-7335-4e51-9bfd-9c92bcd12cc6" (UID: "a97f544e-7335-4e51-9bfd-9c92bcd12cc6"). InnerVolumeSpecName "kube-api-access-ls4nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.934292 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/555925e6-eb4f-4c45-9151-f44a6fee3874-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.934337 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/deceef39-8330-4f41-acb1-fbb4ee4f7d80-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.934350 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmg8l\" (UniqueName: \"kubernetes.io/projected/deceef39-8330-4f41-acb1-fbb4ee4f7d80-kube-api-access-cmg8l\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.934397 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdg5z\" (UniqueName: \"kubernetes.io/projected/555925e6-eb4f-4c45-9151-f44a6fee3874-kube-api-access-pdg5z\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.934411 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.934424 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ls4nj\" (UniqueName: \"kubernetes.io/projected/a97f544e-7335-4e51-9bfd-9c92bcd12cc6-kube-api-access-ls4nj\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.991102 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-h9j24" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.991087 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-h9j24" event={"ID":"555925e6-eb4f-4c45-9151-f44a6fee3874","Type":"ContainerDied","Data":"35b60f487903db251e3f64957fc88010ccb06de67fa3322c2896c27bec378816"} Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.991230 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35b60f487903db251e3f64957fc88010ccb06de67fa3322c2896c27bec378816" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.992713 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-07ce-account-create-update-gkng9" event={"ID":"a97f544e-7335-4e51-9bfd-9c92bcd12cc6","Type":"ContainerDied","Data":"99f3978167175dffab13814033efa3392731e6c83a4f420da3194d9694f74f5f"} Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.992998 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99f3978167175dffab13814033efa3392731e6c83a4f420da3194d9694f74f5f" Jan 20 16:49:28 crc kubenswrapper[4995]: I0120 16:49:28.992730 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-07ce-account-create-update-gkng9" Jan 20 16:49:29 crc kubenswrapper[4995]: I0120 16:49:29.005557 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-0ab3-account-create-update-rspxp" event={"ID":"baf5a0b0-860c-4777-bc84-f6dc4a17af4c","Type":"ContainerDied","Data":"b2bbafff632be39ad53e7fc56ceaada211567860fb09e84049ebe58ae04fe7d9"} Jan 20 16:49:29 crc kubenswrapper[4995]: I0120 16:49:29.005595 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2bbafff632be39ad53e7fc56ceaada211567860fb09e84049ebe58ae04fe7d9" Jan 20 16:49:29 crc kubenswrapper[4995]: I0120 16:49:29.005568 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-0ab3-account-create-update-rspxp" Jan 20 16:49:29 crc kubenswrapper[4995]: I0120 16:49:29.009029 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-p9tsn" event={"ID":"deceef39-8330-4f41-acb1-fbb4ee4f7d80","Type":"ContainerDied","Data":"6a12cf77df1493bdbbf30cd2ea25adbd39efa775b0c1580b867f19e962352b86"} Jan 20 16:49:29 crc kubenswrapper[4995]: I0120 16:49:29.009107 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a12cf77df1493bdbbf30cd2ea25adbd39efa775b0c1580b867f19e962352b86" Jan 20 16:49:29 crc kubenswrapper[4995]: I0120 16:49:29.009120 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-p9tsn" Jan 20 16:49:29 crc kubenswrapper[4995]: I0120 16:49:29.632919 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-cxprs"] Jan 20 16:49:29 crc kubenswrapper[4995]: I0120 16:49:29.639474 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-cxprs"] Jan 20 16:49:29 crc kubenswrapper[4995]: I0120 16:49:29.999875 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e7511e3-04f6-4270-8b11-65bd6528e1dd" path="/var/lib/kubelet/pods/3e7511e3-04f6-4270-8b11-65bd6528e1dd/volumes" Jan 20 16:49:30 crc kubenswrapper[4995]: I0120 16:49:30.020149 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerStarted","Data":"eea83ddf100f67421593176e2e56fe13438674fd192650c7c0c5a223a509466e"} Jan 20 16:49:30 crc kubenswrapper[4995]: I0120 16:49:30.571854 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:49:30 crc kubenswrapper[4995]: I0120 16:49:30.572346 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:49:32 crc kubenswrapper[4995]: I0120 16:49:32.051037 4995 generic.go:334] "Generic (PLEG): container finished" podID="6ecfbb2f-250c-4484-a20f-f45dce557abc" containerID="c86e63d95c4134414cc169f261f0f35f588248a3f48b521d54507b8864a03568" exitCode=0 Jan 20 16:49:32 crc kubenswrapper[4995]: I0120 16:49:32.051150 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jdpl9" event={"ID":"6ecfbb2f-250c-4484-a20f-f45dce557abc","Type":"ContainerDied","Data":"c86e63d95c4134414cc169f261f0f35f588248a3f48b521d54507b8864a03568"} Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.662213 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-f68pw"] Jan 20 16:49:34 crc kubenswrapper[4995]: E0120 16:49:34.663175 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f25738b-1620-4598-985f-3907e8146fb8" containerName="ovn-config" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663197 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f25738b-1620-4598-985f-3907e8146fb8" containerName="ovn-config" Jan 20 16:49:34 crc kubenswrapper[4995]: E0120 16:49:34.663220 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="555925e6-eb4f-4c45-9151-f44a6fee3874" containerName="mariadb-database-create" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663231 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="555925e6-eb4f-4c45-9151-f44a6fee3874" containerName="mariadb-database-create" Jan 20 16:49:34 crc kubenswrapper[4995]: E0120 16:49:34.663254 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deceef39-8330-4f41-acb1-fbb4ee4f7d80" containerName="mariadb-database-create" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663265 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="deceef39-8330-4f41-acb1-fbb4ee4f7d80" containerName="mariadb-database-create" Jan 20 16:49:34 crc kubenswrapper[4995]: E0120 16:49:34.663299 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a97f544e-7335-4e51-9bfd-9c92bcd12cc6" containerName="mariadb-account-create-update" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663310 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a97f544e-7335-4e51-9bfd-9c92bcd12cc6" containerName="mariadb-account-create-update" Jan 20 16:49:34 crc kubenswrapper[4995]: E0120 16:49:34.663334 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baf5a0b0-860c-4777-bc84-f6dc4a17af4c" containerName="mariadb-account-create-update" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663345 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="baf5a0b0-860c-4777-bc84-f6dc4a17af4c" containerName="mariadb-account-create-update" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663591 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="deceef39-8330-4f41-acb1-fbb4ee4f7d80" containerName="mariadb-database-create" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663619 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="baf5a0b0-860c-4777-bc84-f6dc4a17af4c" containerName="mariadb-account-create-update" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663635 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f25738b-1620-4598-985f-3907e8146fb8" containerName="ovn-config" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663667 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="555925e6-eb4f-4c45-9151-f44a6fee3874" containerName="mariadb-database-create" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.663685 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="a97f544e-7335-4e51-9bfd-9c92bcd12cc6" containerName="mariadb-account-create-update" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.664602 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.666514 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.671824 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-f68pw"] Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.750194 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-operator-scripts\") pod \"root-account-create-update-f68pw\" (UID: \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\") " pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.750612 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wwb9\" (UniqueName: \"kubernetes.io/projected/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-kube-api-access-2wwb9\") pod \"root-account-create-update-f68pw\" (UID: \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\") " pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.852721 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-operator-scripts\") pod \"root-account-create-update-f68pw\" (UID: \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\") " pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.852780 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wwb9\" (UniqueName: \"kubernetes.io/projected/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-kube-api-access-2wwb9\") pod \"root-account-create-update-f68pw\" (UID: \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\") " pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.853737 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-operator-scripts\") pod \"root-account-create-update-f68pw\" (UID: \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\") " pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.871475 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wwb9\" (UniqueName: \"kubernetes.io/projected/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-kube-api-access-2wwb9\") pod \"root-account-create-update-f68pw\" (UID: \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\") " pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:34 crc kubenswrapper[4995]: I0120 16:49:34.990140 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:35 crc kubenswrapper[4995]: I0120 16:49:35.929916 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:35 crc kubenswrapper[4995]: I0120 16:49:35.934608 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:35 crc kubenswrapper[4995]: I0120 16:49:35.951798 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.108631 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2afb13bf-8898-4ec7-b9f1-036467eec7fd-operator-scripts\") pod \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\" (UID: \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\") " Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.108754 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pl2mw\" (UniqueName: \"kubernetes.io/projected/2afb13bf-8898-4ec7-b9f1-036467eec7fd-kube-api-access-pl2mw\") pod \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\" (UID: \"2afb13bf-8898-4ec7-b9f1-036467eec7fd\") " Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.108841 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-config-data\") pod \"6ecfbb2f-250c-4484-a20f-f45dce557abc\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.108882 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-db-sync-config-data\") pod \"6ecfbb2f-250c-4484-a20f-f45dce557abc\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.108910 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-combined-ca-bundle\") pod \"6ecfbb2f-250c-4484-a20f-f45dce557abc\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.108953 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7zbh\" (UniqueName: \"kubernetes.io/projected/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-kube-api-access-j7zbh\") pod \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\" (UID: \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\") " Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.108982 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzxjw\" (UniqueName: \"kubernetes.io/projected/6ecfbb2f-250c-4484-a20f-f45dce557abc-kube-api-access-dzxjw\") pod \"6ecfbb2f-250c-4484-a20f-f45dce557abc\" (UID: \"6ecfbb2f-250c-4484-a20f-f45dce557abc\") " Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.109006 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-operator-scripts\") pod \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\" (UID: \"cd799892-ab5d-43c3-aa1e-df5407ed9d7c\") " Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.110026 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cd799892-ab5d-43c3-aa1e-df5407ed9d7c" (UID: "cd799892-ab5d-43c3-aa1e-df5407ed9d7c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.110972 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2afb13bf-8898-4ec7-b9f1-036467eec7fd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2afb13bf-8898-4ec7-b9f1-036467eec7fd" (UID: "2afb13bf-8898-4ec7-b9f1-036467eec7fd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.130179 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2afb13bf-8898-4ec7-b9f1-036467eec7fd-kube-api-access-pl2mw" (OuterVolumeSpecName: "kube-api-access-pl2mw") pod "2afb13bf-8898-4ec7-b9f1-036467eec7fd" (UID: "2afb13bf-8898-4ec7-b9f1-036467eec7fd"). InnerVolumeSpecName "kube-api-access-pl2mw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.131553 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-kube-api-access-j7zbh" (OuterVolumeSpecName: "kube-api-access-j7zbh") pod "cd799892-ab5d-43c3-aa1e-df5407ed9d7c" (UID: "cd799892-ab5d-43c3-aa1e-df5407ed9d7c"). InnerVolumeSpecName "kube-api-access-j7zbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.158292 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ecfbb2f-250c-4484-a20f-f45dce557abc" (UID: "6ecfbb2f-250c-4484-a20f-f45dce557abc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.169873 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-ee38-account-create-update-tg46v" event={"ID":"cd799892-ab5d-43c3-aa1e-df5407ed9d7c","Type":"ContainerDied","Data":"8ef95d2d1f5bd3efe9d62071ae76f28371a8850b5f79027f2d80364b446538cd"} Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.170207 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ef95d2d1f5bd3efe9d62071ae76f28371a8850b5f79027f2d80364b446538cd" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.170184 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-ee38-account-create-update-tg46v" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.172183 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "6ecfbb2f-250c-4484-a20f-f45dce557abc" (UID: "6ecfbb2f-250c-4484-a20f-f45dce557abc"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.173658 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jdpl9" event={"ID":"6ecfbb2f-250c-4484-a20f-f45dce557abc","Type":"ContainerDied","Data":"4bfced606611adb84da47d5315efde8902a485e307cd314bd8b1c6e4b42835a0"} Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.173697 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4bfced606611adb84da47d5315efde8902a485e307cd314bd8b1c6e4b42835a0" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.173785 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jdpl9" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.175864 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-mbl5g" event={"ID":"2afb13bf-8898-4ec7-b9f1-036467eec7fd","Type":"ContainerDied","Data":"1cce59c6cdb53e0629954f993e1e360fdc0781755b9761a50dae18ec428b1fa9"} Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.175882 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cce59c6cdb53e0629954f993e1e360fdc0781755b9761a50dae18ec428b1fa9" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.175956 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-mbl5g" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.186254 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ecfbb2f-250c-4484-a20f-f45dce557abc-kube-api-access-dzxjw" (OuterVolumeSpecName: "kube-api-access-dzxjw") pod "6ecfbb2f-250c-4484-a20f-f45dce557abc" (UID: "6ecfbb2f-250c-4484-a20f-f45dce557abc"). InnerVolumeSpecName "kube-api-access-dzxjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.215846 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pl2mw\" (UniqueName: \"kubernetes.io/projected/2afb13bf-8898-4ec7-b9f1-036467eec7fd-kube-api-access-pl2mw\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.216108 4995 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.216304 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.216402 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7zbh\" (UniqueName: \"kubernetes.io/projected/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-kube-api-access-j7zbh\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.216475 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzxjw\" (UniqueName: \"kubernetes.io/projected/6ecfbb2f-250c-4484-a20f-f45dce557abc-kube-api-access-dzxjw\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.216635 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cd799892-ab5d-43c3-aa1e-df5407ed9d7c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.216849 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2afb13bf-8898-4ec7-b9f1-036467eec7fd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.226357 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-config-data" (OuterVolumeSpecName: "config-data") pod "6ecfbb2f-250c-4484-a20f-f45dce557abc" (UID: "6ecfbb2f-250c-4484-a20f-f45dce557abc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:36 crc kubenswrapper[4995]: I0120 16:49:36.318784 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ecfbb2f-250c-4484-a20f-f45dce557abc-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.187135 4995 generic.go:334] "Generic (PLEG): container finished" podID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerID="eea83ddf100f67421593176e2e56fe13438674fd192650c7c0c5a223a509466e" exitCode=0 Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.187173 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerDied","Data":"eea83ddf100f67421593176e2e56fe13438674fd192650c7c0c5a223a509466e"} Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.420174 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-nxd45"] Jan 20 16:49:37 crc kubenswrapper[4995]: E0120 16:49:37.422947 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2afb13bf-8898-4ec7-b9f1-036467eec7fd" containerName="mariadb-database-create" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.422976 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2afb13bf-8898-4ec7-b9f1-036467eec7fd" containerName="mariadb-database-create" Jan 20 16:49:37 crc kubenswrapper[4995]: E0120 16:49:37.423041 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd799892-ab5d-43c3-aa1e-df5407ed9d7c" containerName="mariadb-account-create-update" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.423049 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd799892-ab5d-43c3-aa1e-df5407ed9d7c" containerName="mariadb-account-create-update" Jan 20 16:49:37 crc kubenswrapper[4995]: E0120 16:49:37.423060 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ecfbb2f-250c-4484-a20f-f45dce557abc" containerName="glance-db-sync" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.423066 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ecfbb2f-250c-4484-a20f-f45dce557abc" containerName="glance-db-sync" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.423574 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ecfbb2f-250c-4484-a20f-f45dce557abc" containerName="glance-db-sync" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.423587 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2afb13bf-8898-4ec7-b9f1-036467eec7fd" containerName="mariadb-database-create" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.423625 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd799892-ab5d-43c3-aa1e-df5407ed9d7c" containerName="mariadb-account-create-update" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.425122 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.439776 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-nxd45"] Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.541831 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.541912 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-config\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.541969 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw9cn\" (UniqueName: \"kubernetes.io/projected/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-kube-api-access-nw9cn\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.542119 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.542205 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.644048 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw9cn\" (UniqueName: \"kubernetes.io/projected/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-kube-api-access-nw9cn\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.644128 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.644160 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.644205 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.644241 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-config\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.645109 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-config\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.645164 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-dns-svc\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.645448 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-nb\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.645930 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-sb\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.663351 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw9cn\" (UniqueName: \"kubernetes.io/projected/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-kube-api-access-nw9cn\") pod \"dnsmasq-dns-5b946c75cc-nxd45\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:37 crc kubenswrapper[4995]: I0120 16:49:37.764018 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:46 crc kubenswrapper[4995]: E0120 16:49:46.867592 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.51:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest" Jan 20 16:49:46 crc kubenswrapper[4995]: E0120 16:49:46.868405 4995 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.51:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest" Jan 20 16:49:46 crc kubenswrapper[4995]: E0120 16:49:46.868537 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-db-sync,Image:38.102.83.51:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/watcher/watcher.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zdfl8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-db-sync-ncfj6_openstack(e5603781-3cf3-41db-bfc7-7dc74d244fd4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:49:46 crc kubenswrapper[4995]: E0120 16:49:46.869752 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/watcher-db-sync-ncfj6" podUID="e5603781-3cf3-41db-bfc7-7dc74d244fd4" Jan 20 16:49:47 crc kubenswrapper[4995]: I0120 16:49:47.283856 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-f68pw"] Jan 20 16:49:47 crc kubenswrapper[4995]: W0120 16:49:47.288140 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa93e1a6_e043_4c7b_bda1_264af7bf6f53.slice/crio-7e6903e9f891985442d443c56199054ab5e9a42c34bf78162b8f82c1f56d368d WatchSource:0}: Error finding container 7e6903e9f891985442d443c56199054ab5e9a42c34bf78162b8f82c1f56d368d: Status 404 returned error can't find the container with id 7e6903e9f891985442d443c56199054ab5e9a42c34bf78162b8f82c1f56d368d Jan 20 16:49:47 crc kubenswrapper[4995]: I0120 16:49:47.290464 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-nxd45"] Jan 20 16:49:47 crc kubenswrapper[4995]: W0120 16:49:47.291333 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod473df9a8_e4eb_44ed_a6c8_2e7c815063ba.slice/crio-e175fd1d331022bc8e2e9c7d7e7b16a43a11e85a636d0b5bbc71211acffe1de2 WatchSource:0}: Error finding container e175fd1d331022bc8e2e9c7d7e7b16a43a11e85a636d0b5bbc71211acffe1de2: Status 404 returned error can't find the container with id e175fd1d331022bc8e2e9c7d7e7b16a43a11e85a636d0b5bbc71211acffe1de2 Jan 20 16:49:47 crc kubenswrapper[4995]: I0120 16:49:47.292012 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"212be13adc74edf83152d1e0dd5fb8ea5f1771ad1fcbbdb2a52963cb7140fc6e"} Jan 20 16:49:47 crc kubenswrapper[4995]: I0120 16:49:47.292043 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"21f292350d399017a4ce2fb95126f70bce780194b24df24080f12198e78ed564"} Jan 20 16:49:47 crc kubenswrapper[4995]: I0120 16:49:47.294205 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerStarted","Data":"b6a446f2b1cb31d2aec998fcd6c1d6cfa56fd69253133b3c554dd530c0ed8bd1"} Jan 20 16:49:47 crc kubenswrapper[4995]: I0120 16:49:47.296169 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-j7gvz" event={"ID":"17f5be4a-fe5c-414f-b2af-3e06500135ba","Type":"ContainerStarted","Data":"c0596506311edc958891b00a4e638a168c9bd118c6a8a46b534b53a4617491b3"} Jan 20 16:49:47 crc kubenswrapper[4995]: E0120 16:49:47.297265 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.51:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest\\\"\"" pod="openstack/watcher-db-sync-ncfj6" podUID="e5603781-3cf3-41db-bfc7-7dc74d244fd4" Jan 20 16:49:47 crc kubenswrapper[4995]: I0120 16:49:47.335148 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-j7gvz" podStartSLOduration=2.117774169 podStartE2EDuration="23.335131159s" podCreationTimestamp="2026-01-20 16:49:24 +0000 UTC" firstStartedPulling="2026-01-20 16:49:25.607146812 +0000 UTC m=+1083.851751618" lastFinishedPulling="2026-01-20 16:49:46.824503802 +0000 UTC m=+1105.069108608" observedRunningTime="2026-01-20 16:49:47.328043037 +0000 UTC m=+1105.572647853" watchObservedRunningTime="2026-01-20 16:49:47.335131159 +0000 UTC m=+1105.579735975" Jan 20 16:49:48 crc kubenswrapper[4995]: I0120 16:49:48.308029 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"97ec1169ab850a861cdcdc709f5dedf522c9c35896ef17c6c9e8bf8c8f06ae35"} Jan 20 16:49:48 crc kubenswrapper[4995]: I0120 16:49:48.308378 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"059735aca65bb29c017b25fc415b781f99fd4c396bc553a00fefaee32c2910f8"} Jan 20 16:49:48 crc kubenswrapper[4995]: I0120 16:49:48.310271 4995 generic.go:334] "Generic (PLEG): container finished" podID="473df9a8-e4eb-44ed-a6c8-2e7c815063ba" containerID="2a8cf128193a25a11de2bf8c2c472de613c727db9a2407030beaa669f491cede" exitCode=0 Jan 20 16:49:48 crc kubenswrapper[4995]: I0120 16:49:48.310358 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-f68pw" event={"ID":"473df9a8-e4eb-44ed-a6c8-2e7c815063ba","Type":"ContainerDied","Data":"2a8cf128193a25a11de2bf8c2c472de613c727db9a2407030beaa669f491cede"} Jan 20 16:49:48 crc kubenswrapper[4995]: I0120 16:49:48.310685 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-f68pw" event={"ID":"473df9a8-e4eb-44ed-a6c8-2e7c815063ba","Type":"ContainerStarted","Data":"e175fd1d331022bc8e2e9c7d7e7b16a43a11e85a636d0b5bbc71211acffe1de2"} Jan 20 16:49:48 crc kubenswrapper[4995]: I0120 16:49:48.312669 4995 generic.go:334] "Generic (PLEG): container finished" podID="fa93e1a6-e043-4c7b-bda1-264af7bf6f53" containerID="330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100" exitCode=0 Jan 20 16:49:48 crc kubenswrapper[4995]: I0120 16:49:48.312697 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" event={"ID":"fa93e1a6-e043-4c7b-bda1-264af7bf6f53","Type":"ContainerDied","Data":"330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100"} Jan 20 16:49:48 crc kubenswrapper[4995]: I0120 16:49:48.312723 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" event={"ID":"fa93e1a6-e043-4c7b-bda1-264af7bf6f53","Type":"ContainerStarted","Data":"7e6903e9f891985442d443c56199054ab5e9a42c34bf78162b8f82c1f56d368d"} Jan 20 16:49:49 crc kubenswrapper[4995]: I0120 16:49:49.324437 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" event={"ID":"fa93e1a6-e043-4c7b-bda1-264af7bf6f53","Type":"ContainerStarted","Data":"fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d"} Jan 20 16:49:49 crc kubenswrapper[4995]: I0120 16:49:49.324793 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:49 crc kubenswrapper[4995]: I0120 16:49:49.333208 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"40de6785f8bdaab0c7a6f5d9c6281f0b735803621c6936dd4fe54b5b25ac7fef"} Jan 20 16:49:49 crc kubenswrapper[4995]: I0120 16:49:49.367996 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" podStartSLOduration=12.367975855 podStartE2EDuration="12.367975855s" podCreationTimestamp="2026-01-20 16:49:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:49.362451925 +0000 UTC m=+1107.607056771" watchObservedRunningTime="2026-01-20 16:49:49.367975855 +0000 UTC m=+1107.612580671" Jan 20 16:49:49 crc kubenswrapper[4995]: I0120 16:49:49.895773 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:49 crc kubenswrapper[4995]: I0120 16:49:49.978703 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wwb9\" (UniqueName: \"kubernetes.io/projected/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-kube-api-access-2wwb9\") pod \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\" (UID: \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\") " Jan 20 16:49:49 crc kubenswrapper[4995]: I0120 16:49:49.978964 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-operator-scripts\") pod \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\" (UID: \"473df9a8-e4eb-44ed-a6c8-2e7c815063ba\") " Jan 20 16:49:49 crc kubenswrapper[4995]: I0120 16:49:49.979599 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "473df9a8-e4eb-44ed-a6c8-2e7c815063ba" (UID: "473df9a8-e4eb-44ed-a6c8-2e7c815063ba"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:49 crc kubenswrapper[4995]: I0120 16:49:49.986704 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-kube-api-access-2wwb9" (OuterVolumeSpecName: "kube-api-access-2wwb9") pod "473df9a8-e4eb-44ed-a6c8-2e7c815063ba" (UID: "473df9a8-e4eb-44ed-a6c8-2e7c815063ba"). InnerVolumeSpecName "kube-api-access-2wwb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:50 crc kubenswrapper[4995]: I0120 16:49:50.081492 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:50 crc kubenswrapper[4995]: I0120 16:49:50.081553 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wwb9\" (UniqueName: \"kubernetes.io/projected/473df9a8-e4eb-44ed-a6c8-2e7c815063ba-kube-api-access-2wwb9\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:50 crc kubenswrapper[4995]: I0120 16:49:50.348022 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-f68pw" event={"ID":"473df9a8-e4eb-44ed-a6c8-2e7c815063ba","Type":"ContainerDied","Data":"e175fd1d331022bc8e2e9c7d7e7b16a43a11e85a636d0b5bbc71211acffe1de2"} Jan 20 16:49:50 crc kubenswrapper[4995]: I0120 16:49:50.348049 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-f68pw" Jan 20 16:49:50 crc kubenswrapper[4995]: I0120 16:49:50.348063 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e175fd1d331022bc8e2e9c7d7e7b16a43a11e85a636d0b5bbc71211acffe1de2" Jan 20 16:49:50 crc kubenswrapper[4995]: I0120 16:49:50.357922 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"53497b851a32e99fe20098fe8107fc286b4d919084bb7ab8fd301be06e71e12c"} Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.365588 4995 generic.go:334] "Generic (PLEG): container finished" podID="17f5be4a-fe5c-414f-b2af-3e06500135ba" containerID="c0596506311edc958891b00a4e638a168c9bd118c6a8a46b534b53a4617491b3" exitCode=0 Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.365961 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-j7gvz" event={"ID":"17f5be4a-fe5c-414f-b2af-3e06500135ba","Type":"ContainerDied","Data":"c0596506311edc958891b00a4e638a168c9bd118c6a8a46b534b53a4617491b3"} Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.373183 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"3f6eaaa487ac195db914fce59c3ecda539b5232c0135e9d863d3cd078bd323b9"} Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.373341 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"2ef704332b5fe18d5165e2762e5fbcb94ff314dae90f4f0eef6a5c6a0c530406"} Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.373426 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"dc337e5a24fc37208c5d19b8c143d67341b0fba2b9f72a703fb72504c22a9509"} Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.373501 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"d1de0e3f4a0070b2e4a4ea74366307c1bd0069db7772803d21b4306021de0492"} Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.373574 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"3f11d1ef-8720-4a15-91b7-2ad1602194f7","Type":"ContainerStarted","Data":"a346475531364b533833ab42bdbbe3c376cf147911f6a230601cd1a0e9289a30"} Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.430524 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=36.914540275 podStartE2EDuration="1m3.430506394s" podCreationTimestamp="2026-01-20 16:48:48 +0000 UTC" firstStartedPulling="2026-01-20 16:49:22.342519409 +0000 UTC m=+1080.587124205" lastFinishedPulling="2026-01-20 16:49:48.858485518 +0000 UTC m=+1107.103090324" observedRunningTime="2026-01-20 16:49:51.417528203 +0000 UTC m=+1109.662133049" watchObservedRunningTime="2026-01-20 16:49:51.430506394 +0000 UTC m=+1109.675111200" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.697738 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-nxd45"] Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.698304 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" podUID="fa93e1a6-e043-4c7b-bda1-264af7bf6f53" containerName="dnsmasq-dns" containerID="cri-o://fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d" gracePeriod=10 Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.737520 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-qms8s"] Jan 20 16:49:51 crc kubenswrapper[4995]: E0120 16:49:51.737958 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="473df9a8-e4eb-44ed-a6c8-2e7c815063ba" containerName="mariadb-account-create-update" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.737978 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="473df9a8-e4eb-44ed-a6c8-2e7c815063ba" containerName="mariadb-account-create-update" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.738207 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="473df9a8-e4eb-44ed-a6c8-2e7c815063ba" containerName="mariadb-account-create-update" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.739434 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.741546 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.750140 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-qms8s"] Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.810748 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-config\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.810854 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.810929 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45qzd\" (UniqueName: \"kubernetes.io/projected/a037abd1-08fa-45f1-a279-cd961046266d-kube-api-access-45qzd\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.810991 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.811169 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.811234 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.915344 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-config\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.915389 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.915419 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45qzd\" (UniqueName: \"kubernetes.io/projected/a037abd1-08fa-45f1-a279-cd961046266d-kube-api-access-45qzd\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.915449 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.915532 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.915560 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.917054 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.917372 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.917394 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.917394 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-config\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.917950 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:51 crc kubenswrapper[4995]: I0120 16:49:51.949610 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45qzd\" (UniqueName: \"kubernetes.io/projected/a037abd1-08fa-45f1-a279-cd961046266d-kube-api-access-45qzd\") pod \"dnsmasq-dns-74f6bcbc87-qms8s\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.152814 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.181455 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.327674 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-sb\") pod \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.327815 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-dns-svc\") pod \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.327912 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-nb\") pod \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.327935 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-config\") pod \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.327969 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nw9cn\" (UniqueName: \"kubernetes.io/projected/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-kube-api-access-nw9cn\") pod \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\" (UID: \"fa93e1a6-e043-4c7b-bda1-264af7bf6f53\") " Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.360400 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-kube-api-access-nw9cn" (OuterVolumeSpecName: "kube-api-access-nw9cn") pod "fa93e1a6-e043-4c7b-bda1-264af7bf6f53" (UID: "fa93e1a6-e043-4c7b-bda1-264af7bf6f53"). InnerVolumeSpecName "kube-api-access-nw9cn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.407459 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "fa93e1a6-e043-4c7b-bda1-264af7bf6f53" (UID: "fa93e1a6-e043-4c7b-bda1-264af7bf6f53"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.412712 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "fa93e1a6-e043-4c7b-bda1-264af7bf6f53" (UID: "fa93e1a6-e043-4c7b-bda1-264af7bf6f53"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.413765 4995 generic.go:334] "Generic (PLEG): container finished" podID="fa93e1a6-e043-4c7b-bda1-264af7bf6f53" containerID="fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d" exitCode=0 Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.413817 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" event={"ID":"fa93e1a6-e043-4c7b-bda1-264af7bf6f53","Type":"ContainerDied","Data":"fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d"} Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.413855 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.413874 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b946c75cc-nxd45" event={"ID":"fa93e1a6-e043-4c7b-bda1-264af7bf6f53","Type":"ContainerDied","Data":"7e6903e9f891985442d443c56199054ab5e9a42c34bf78162b8f82c1f56d368d"} Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.413892 4995 scope.go:117] "RemoveContainer" containerID="fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.430150 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nw9cn\" (UniqueName: \"kubernetes.io/projected/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-kube-api-access-nw9cn\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.430191 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.430199 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.440956 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "fa93e1a6-e043-4c7b-bda1-264af7bf6f53" (UID: "fa93e1a6-e043-4c7b-bda1-264af7bf6f53"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.442899 4995 scope.go:117] "RemoveContainer" containerID="330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.473813 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-config" (OuterVolumeSpecName: "config") pod "fa93e1a6-e043-4c7b-bda1-264af7bf6f53" (UID: "fa93e1a6-e043-4c7b-bda1-264af7bf6f53"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.488966 4995 scope.go:117] "RemoveContainer" containerID="fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d" Jan 20 16:49:52 crc kubenswrapper[4995]: E0120 16:49:52.489448 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d\": container with ID starting with fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d not found: ID does not exist" containerID="fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.489490 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d"} err="failed to get container status \"fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d\": rpc error: code = NotFound desc = could not find container \"fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d\": container with ID starting with fcc4ce952bfc8b59d5cf1578f3582c200d04af95cfb0743003452abbe6674e8d not found: ID does not exist" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.489510 4995 scope.go:117] "RemoveContainer" containerID="330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100" Jan 20 16:49:52 crc kubenswrapper[4995]: E0120 16:49:52.489875 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100\": container with ID starting with 330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100 not found: ID does not exist" containerID="330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.489896 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100"} err="failed to get container status \"330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100\": rpc error: code = NotFound desc = could not find container \"330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100\": container with ID starting with 330913b7113277e11cf856718b8a2ff5eba90d20ef0116cfca721f75e3595100 not found: ID does not exist" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.532683 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.532729 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fa93e1a6-e043-4c7b-bda1-264af7bf6f53-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.754097 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-nxd45"] Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.761387 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b946c75cc-nxd45"] Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.762527 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.808110 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-qms8s"] Jan 20 16:49:52 crc kubenswrapper[4995]: W0120 16:49:52.816949 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda037abd1_08fa_45f1_a279_cd961046266d.slice/crio-83ed512829faf3db1e16c4bd6a07f306acdf46297641bc952189379ac41ed055 WatchSource:0}: Error finding container 83ed512829faf3db1e16c4bd6a07f306acdf46297641bc952189379ac41ed055: Status 404 returned error can't find the container with id 83ed512829faf3db1e16c4bd6a07f306acdf46297641bc952189379ac41ed055 Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.841406 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-combined-ca-bundle\") pod \"17f5be4a-fe5c-414f-b2af-3e06500135ba\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.841548 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sq5x\" (UniqueName: \"kubernetes.io/projected/17f5be4a-fe5c-414f-b2af-3e06500135ba-kube-api-access-5sq5x\") pod \"17f5be4a-fe5c-414f-b2af-3e06500135ba\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.841776 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-config-data\") pod \"17f5be4a-fe5c-414f-b2af-3e06500135ba\" (UID: \"17f5be4a-fe5c-414f-b2af-3e06500135ba\") " Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.844048 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17f5be4a-fe5c-414f-b2af-3e06500135ba-kube-api-access-5sq5x" (OuterVolumeSpecName: "kube-api-access-5sq5x") pod "17f5be4a-fe5c-414f-b2af-3e06500135ba" (UID: "17f5be4a-fe5c-414f-b2af-3e06500135ba"). InnerVolumeSpecName "kube-api-access-5sq5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.867918 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17f5be4a-fe5c-414f-b2af-3e06500135ba" (UID: "17f5be4a-fe5c-414f-b2af-3e06500135ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.892282 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-config-data" (OuterVolumeSpecName: "config-data") pod "17f5be4a-fe5c-414f-b2af-3e06500135ba" (UID: "17f5be4a-fe5c-414f-b2af-3e06500135ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.943737 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.943848 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17f5be4a-fe5c-414f-b2af-3e06500135ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:52 crc kubenswrapper[4995]: I0120 16:49:52.943904 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sq5x\" (UniqueName: \"kubernetes.io/projected/17f5be4a-fe5c-414f-b2af-3e06500135ba-kube-api-access-5sq5x\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.426691 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-j7gvz" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.426697 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-j7gvz" event={"ID":"17f5be4a-fe5c-414f-b2af-3e06500135ba","Type":"ContainerDied","Data":"373dc034649074fc0f2479f8a3c01e0138a9af436755de9b089f0ccc5e5fa823"} Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.428119 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="373dc034649074fc0f2479f8a3c01e0138a9af436755de9b089f0ccc5e5fa823" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.430972 4995 generic.go:334] "Generic (PLEG): container finished" podID="a037abd1-08fa-45f1-a279-cd961046266d" containerID="df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e" exitCode=0 Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.431035 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" event={"ID":"a037abd1-08fa-45f1-a279-cd961046266d","Type":"ContainerDied","Data":"df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e"} Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.431107 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" event={"ID":"a037abd1-08fa-45f1-a279-cd961046266d","Type":"ContainerStarted","Data":"83ed512829faf3db1e16c4bd6a07f306acdf46297641bc952189379ac41ed055"} Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.695646 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-qms8s"] Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.704872 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-nsnv8"] Jan 20 16:49:53 crc kubenswrapper[4995]: E0120 16:49:53.705369 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa93e1a6-e043-4c7b-bda1-264af7bf6f53" containerName="init" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.705390 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa93e1a6-e043-4c7b-bda1-264af7bf6f53" containerName="init" Jan 20 16:49:53 crc kubenswrapper[4995]: E0120 16:49:53.705405 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17f5be4a-fe5c-414f-b2af-3e06500135ba" containerName="keystone-db-sync" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.705411 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="17f5be4a-fe5c-414f-b2af-3e06500135ba" containerName="keystone-db-sync" Jan 20 16:49:53 crc kubenswrapper[4995]: E0120 16:49:53.705421 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa93e1a6-e043-4c7b-bda1-264af7bf6f53" containerName="dnsmasq-dns" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.705427 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa93e1a6-e043-4c7b-bda1-264af7bf6f53" containerName="dnsmasq-dns" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.705580 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="17f5be4a-fe5c-414f-b2af-3e06500135ba" containerName="keystone-db-sync" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.705598 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa93e1a6-e043-4c7b-bda1-264af7bf6f53" containerName="dnsmasq-dns" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.706179 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.716745 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-m8prt" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.716967 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.717794 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.718044 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.722108 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-nsnv8"] Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.733259 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-gfbpk"] Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.734382 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.745729 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.763471 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-gfbpk"] Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879159 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879519 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn92v\" (UniqueName: \"kubernetes.io/projected/12094649-404d-4452-a514-256d83ae33e1-kube-api-access-kn92v\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879552 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879597 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-combined-ca-bundle\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879630 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-fernet-keys\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879662 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2qpn\" (UniqueName: \"kubernetes.io/projected/60daeddb-7af3-4e92-89e6-5a302af596a0-kube-api-access-b2qpn\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879682 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-scripts\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879719 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-credential-keys\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879747 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-svc\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879770 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-config-data\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879795 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.879819 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-config\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.894940 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7dc8cbd4f9-qkmt7"] Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.896282 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.900603 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.900696 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.900805 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.901111 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-49vkb" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.922281 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7dc8cbd4f9-qkmt7"] Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.980992 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn92v\" (UniqueName: \"kubernetes.io/projected/12094649-404d-4452-a514-256d83ae33e1-kube-api-access-kn92v\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981041 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981095 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cp7lj\" (UniqueName: \"kubernetes.io/projected/a79e57d7-d6cb-43fa-b790-822e3e532591-kube-api-access-cp7lj\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981119 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-combined-ca-bundle\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981145 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-fernet-keys\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981167 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2qpn\" (UniqueName: \"kubernetes.io/projected/60daeddb-7af3-4e92-89e6-5a302af596a0-kube-api-access-b2qpn\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981183 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-scripts\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981201 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a79e57d7-d6cb-43fa-b790-822e3e532591-logs\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981223 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-config-data\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981238 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-scripts\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981258 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-credential-keys\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981280 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-svc\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981298 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-config-data\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981319 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981335 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-config\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981370 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a79e57d7-d6cb-43fa-b790-822e3e532591-horizon-secret-key\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.981390 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.982278 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:53 crc kubenswrapper[4995]: I0120 16:49:53.983045 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:53.997192 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:53.997892 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-config\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:53.998662 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-svc\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.017642 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-scripts\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.043496 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa93e1a6-e043-4c7b-bda1-264af7bf6f53" path="/var/lib/kubelet/pods/fa93e1a6-e043-4c7b-bda1-264af7bf6f53/volumes" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.044195 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-hmlm7"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.052858 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.055826 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn92v\" (UniqueName: \"kubernetes.io/projected/12094649-404d-4452-a514-256d83ae33e1-kube-api-access-kn92v\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.056188 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-sh7s5" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.056569 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.058084 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2qpn\" (UniqueName: \"kubernetes.io/projected/60daeddb-7af3-4e92-89e6-5a302af596a0-kube-api-access-b2qpn\") pod \"dnsmasq-dns-847c4cc679-gfbpk\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.059197 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-fernet-keys\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.060564 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-credential-keys\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.066411 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.073048 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-combined-ca-bundle\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.074876 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-hmlm7"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.076416 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-config-data\") pod \"keystone-bootstrap-nsnv8\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.084036 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a79e57d7-d6cb-43fa-b790-822e3e532591-logs\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.084614 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a79e57d7-d6cb-43fa-b790-822e3e532591-logs\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.084683 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-config-data\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.084731 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-scripts\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.084841 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a79e57d7-d6cb-43fa-b790-822e3e532591-horizon-secret-key\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.084962 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cp7lj\" (UniqueName: \"kubernetes.io/projected/a79e57d7-d6cb-43fa-b790-822e3e532591-kube-api-access-cp7lj\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.088434 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.097851 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-config-data\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.105268 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-cc4hb"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.106334 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.112493 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a79e57d7-d6cb-43fa-b790-822e3e532591-horizon-secret-key\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.113156 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-scripts\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.114537 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-cc4hb"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.117263 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.117795 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-zb7rr" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.145902 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.147750 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.163339 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.163661 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-5lbqx" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.163822 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.163959 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.184224 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cp7lj\" (UniqueName: \"kubernetes.io/projected/a79e57d7-d6cb-43fa-b790-822e3e532591-kube-api-access-cp7lj\") pod \"horizon-7dc8cbd4f9-qkmt7\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.186495 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-config-data\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.186552 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-scripts\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.186577 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-558j4\" (UniqueName: \"kubernetes.io/projected/53794c82-829c-4b77-b902-01be2130f0b8-kube-api-access-558j4\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.186652 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-db-sync-config-data\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.186694 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53794c82-829c-4b77-b902-01be2130f0b8-etc-machine-id\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.186799 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-combined-ca-bundle\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.201143 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-4zjlv"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.202488 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.223439 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.223600 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.223778 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-8sfqz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.236521 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.285155 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287738 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-config-data\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287778 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-db-sync-config-data\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287798 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287818 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fl6p\" (UniqueName: \"kubernetes.io/projected/40362747-51a2-473e-845c-3427003b9b7a-kube-api-access-6fl6p\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287845 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53794c82-829c-4b77-b902-01be2130f0b8-etc-machine-id\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287864 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjdrq\" (UniqueName: \"kubernetes.io/projected/6cb81edf-880e-421a-bc37-258db15b1ad9-kube-api-access-pjdrq\") pod \"barbican-db-sync-cc4hb\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287882 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-db-sync-config-data\") pod \"barbican-db-sync-cc4hb\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287904 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qjwh\" (UniqueName: \"kubernetes.io/projected/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-kube-api-access-9qjwh\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287925 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-scripts\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287959 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40362747-51a2-473e-845c-3427003b9b7a-logs\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.287988 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288011 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288027 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-combined-ca-bundle\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288056 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-combined-ca-bundle\") pod \"barbican-db-sync-cc4hb\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288091 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-combined-ca-bundle\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288122 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288150 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-scripts\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288176 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-config-data\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288200 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-scripts\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288218 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-558j4\" (UniqueName: \"kubernetes.io/projected/53794c82-829c-4b77-b902-01be2130f0b8-kube-api-access-558j4\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288246 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-logs\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.288267 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-config-data\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.289287 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53794c82-829c-4b77-b902-01be2130f0b8-etc-machine-id\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.306525 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-config-data\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.316220 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-gfbpk"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.349151 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4zjlv"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.352773 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.381432 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-558j4\" (UniqueName: \"kubernetes.io/projected/53794c82-829c-4b77-b902-01be2130f0b8-kube-api-access-558j4\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.389290 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-mzctz"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.390382 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.392620 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-db-sync-config-data\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403128 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403160 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fl6p\" (UniqueName: \"kubernetes.io/projected/40362747-51a2-473e-845c-3427003b9b7a-kube-api-access-6fl6p\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403201 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjdrq\" (UniqueName: \"kubernetes.io/projected/6cb81edf-880e-421a-bc37-258db15b1ad9-kube-api-access-pjdrq\") pod \"barbican-db-sync-cc4hb\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403223 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-db-sync-config-data\") pod \"barbican-db-sync-cc4hb\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403246 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qjwh\" (UniqueName: \"kubernetes.io/projected/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-kube-api-access-9qjwh\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403273 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-scripts\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403325 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40362747-51a2-473e-845c-3427003b9b7a-logs\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403373 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403388 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403430 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-combined-ca-bundle\") pod \"barbican-db-sync-cc4hb\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403445 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-combined-ca-bundle\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403472 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403492 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-scripts\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403513 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-scripts\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403537 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-logs\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403550 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-config-data\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403577 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-config-data\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.403896 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40362747-51a2-473e-845c-3427003b9b7a-logs\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.406818 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-26dnj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.407048 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.398064 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-combined-ca-bundle\") pod \"cinder-db-sync-hmlm7\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.407943 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.408247 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.413240 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-logs\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.415472 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.421410 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.421975 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-scripts\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.427674 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-db-sync-config-data\") pod \"barbican-db-sync-cc4hb\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.428664 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-scripts\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.431171 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-combined-ca-bundle\") pod \"barbican-db-sync-cc4hb\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.435473 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.436835 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.446017 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-config-data\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.465806 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-config-data\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.505812 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fl6p\" (UniqueName: \"kubernetes.io/projected/40362747-51a2-473e-845c-3427003b9b7a-kube-api-access-6fl6p\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.505952 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qjwh\" (UniqueName: \"kubernetes.io/projected/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-kube-api-access-9qjwh\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.507237 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjdrq\" (UniqueName: \"kubernetes.io/projected/6cb81edf-880e-421a-bc37-258db15b1ad9-kube-api-access-pjdrq\") pod \"barbican-db-sync-cc4hb\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.538598 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-combined-ca-bundle\") pod \"neutron-db-sync-mzctz\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.538942 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hq82f\" (UniqueName: \"kubernetes.io/projected/c8a61b44-4464-497c-881e-bdc0d9063bd9-kube-api-access-hq82f\") pod \"neutron-db-sync-mzctz\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.539021 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-config\") pod \"neutron-db-sync-mzctz\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.545186 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerStarted","Data":"dd48c16250e4e399de598759fcf60283ca4ba1c2d9f854a1f5c0207e0bd864c7"} Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.545397 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerStarted","Data":"08c4e805f0ea901d7a17afed9a299e1055337412cd83d4f2937427c65194932b"} Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.553707 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-combined-ca-bundle\") pod \"placement-db-sync-4zjlv\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.609590 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" event={"ID":"a037abd1-08fa-45f1-a279-cd961046266d","Type":"ContainerStarted","Data":"7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e"} Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.609871 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" podUID="a037abd1-08fa-45f1-a279-cd961046266d" containerName="dnsmasq-dns" containerID="cri-o://7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e" gracePeriod=10 Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.617385 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.617659 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.662544 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-combined-ca-bundle\") pod \"neutron-db-sync-mzctz\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.662667 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hq82f\" (UniqueName: \"kubernetes.io/projected/c8a61b44-4464-497c-881e-bdc0d9063bd9-kube-api-access-hq82f\") pod \"neutron-db-sync-mzctz\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.662700 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-config\") pod \"neutron-db-sync-mzctz\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.670012 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-config\") pod \"neutron-db-sync-mzctz\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.670027 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-combined-ca-bundle\") pod \"neutron-db-sync-mzctz\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.670387 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.675296 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-mzctz"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.696828 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.710010 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6db4b5b7df-cv7h4"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.712235 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.713939 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.718118 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-fcjgj"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.719797 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.724022 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hq82f\" (UniqueName: \"kubernetes.io/projected/c8a61b44-4464-497c-881e-bdc0d9063bd9-kube-api-access-hq82f\") pod \"neutron-db-sync-mzctz\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.736498 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.736678 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.744150 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.744808 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.766364 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-fcjgj"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.774879 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6db4b5b7df-cv7h4"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.801340 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.805507 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.807468 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4zjlv" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.813265 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.813480 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.819939 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.835223 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=28.835205154 podStartE2EDuration="28.835205154s" podCreationTimestamp="2026-01-20 16:49:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:54.707834422 +0000 UTC m=+1112.952439228" watchObservedRunningTime="2026-01-20 16:49:54.835205154 +0000 UTC m=+1113.079809960" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.849778 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" podStartSLOduration=3.849760318 podStartE2EDuration="3.849760318s" podCreationTimestamp="2026-01-20 16:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:54.829020295 +0000 UTC m=+1113.073625101" watchObservedRunningTime="2026-01-20 16:49:54.849760318 +0000 UTC m=+1113.094365124" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868453 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4v4x\" (UniqueName: \"kubernetes.io/projected/f12a8420-f199-4df8-8e65-66fe1a2d9fce-kube-api-access-g4v4x\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868499 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868522 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-config\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868544 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-run-httpd\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868562 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-config-data\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868862 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-log-httpd\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868892 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868916 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm2d9\" (UniqueName: \"kubernetes.io/projected/701cf418-d6f5-4326-b237-2fd120de4bd3-kube-api-access-wm2d9\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868943 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jr2b\" (UniqueName: \"kubernetes.io/projected/1e646811-19e9-4a68-a419-6d0db9feb93e-kube-api-access-6jr2b\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.868982 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-scripts\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.869022 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.869044 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.869149 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-scripts\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.869176 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.869210 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.869240 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/701cf418-d6f5-4326-b237-2fd120de4bd3-horizon-secret-key\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.869259 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/701cf418-d6f5-4326-b237-2fd120de4bd3-logs\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.869327 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-config-data\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.878053 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-mzctz" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.970877 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-log-httpd\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.970926 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.970963 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm2d9\" (UniqueName: \"kubernetes.io/projected/701cf418-d6f5-4326-b237-2fd120de4bd3-kube-api-access-wm2d9\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.970993 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-logs\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971021 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jr2b\" (UniqueName: \"kubernetes.io/projected/1e646811-19e9-4a68-a419-6d0db9feb93e-kube-api-access-6jr2b\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971066 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-scripts\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971117 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971141 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971172 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-scripts\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971203 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971242 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971269 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971297 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971321 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971351 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/701cf418-d6f5-4326-b237-2fd120de4bd3-horizon-secret-key\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971377 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/701cf418-d6f5-4326-b237-2fd120de4bd3-logs\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971404 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971440 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4dgv\" (UniqueName: \"kubernetes.io/projected/e905a434-9ee1-4a03-a13d-22cb5b0bd609-kube-api-access-p4dgv\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971473 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-config-data\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971503 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971520 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4v4x\" (UniqueName: \"kubernetes.io/projected/f12a8420-f199-4df8-8e65-66fe1a2d9fce-kube-api-access-g4v4x\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971560 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971582 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971599 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-config\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971618 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-run-httpd\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.971637 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-config-data\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.976647 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-log-httpd\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.977308 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.977538 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-config-data\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.983294 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/701cf418-d6f5-4326-b237-2fd120de4bd3-logs\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.983475 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/701cf418-d6f5-4326-b237-2fd120de4bd3-horizon-secret-key\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.984965 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-config\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.985679 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.988297 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-config-data\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.989159 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-run-httpd\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.989406 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.989547 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.989741 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-scripts\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.990384 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:54 crc kubenswrapper[4995]: I0120 16:49:54.990650 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-scripts\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.013298 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.020383 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7dc8cbd4f9-qkmt7"] Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.041770 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jr2b\" (UniqueName: \"kubernetes.io/projected/1e646811-19e9-4a68-a419-6d0db9feb93e-kube-api-access-6jr2b\") pod \"ceilometer-0\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " pod="openstack/ceilometer-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.043605 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4v4x\" (UniqueName: \"kubernetes.io/projected/f12a8420-f199-4df8-8e65-66fe1a2d9fce-kube-api-access-g4v4x\") pod \"dnsmasq-dns-785d8bcb8c-fcjgj\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.044643 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm2d9\" (UniqueName: \"kubernetes.io/projected/701cf418-d6f5-4326-b237-2fd120de4bd3-kube-api-access-wm2d9\") pod \"horizon-6db4b5b7df-cv7h4\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.074278 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.074323 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.074380 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.074411 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4dgv\" (UniqueName: \"kubernetes.io/projected/e905a434-9ee1-4a03-a13d-22cb5b0bd609-kube-api-access-p4dgv\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.074450 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.074484 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.074544 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-logs\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.074644 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.076166 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.078701 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.079109 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.079780 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-logs\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.086608 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.105615 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.111461 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4dgv\" (UniqueName: \"kubernetes.io/projected/e905a434-9ee1-4a03-a13d-22cb5b0bd609-kube-api-access-p4dgv\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.116339 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.127917 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.140220 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.162209 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.169042 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.182152 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.258416 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-gfbpk"] Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.365584 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.487408 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-nb\") pod \"a037abd1-08fa-45f1-a279-cd961046266d\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.487494 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-swift-storage-0\") pod \"a037abd1-08fa-45f1-a279-cd961046266d\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.487532 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-sb\") pod \"a037abd1-08fa-45f1-a279-cd961046266d\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.487560 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45qzd\" (UniqueName: \"kubernetes.io/projected/a037abd1-08fa-45f1-a279-cd961046266d-kube-api-access-45qzd\") pod \"a037abd1-08fa-45f1-a279-cd961046266d\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.487728 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-config\") pod \"a037abd1-08fa-45f1-a279-cd961046266d\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.487775 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-svc\") pod \"a037abd1-08fa-45f1-a279-cd961046266d\" (UID: \"a037abd1-08fa-45f1-a279-cd961046266d\") " Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.509347 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a037abd1-08fa-45f1-a279-cd961046266d-kube-api-access-45qzd" (OuterVolumeSpecName: "kube-api-access-45qzd") pod "a037abd1-08fa-45f1-a279-cd961046266d" (UID: "a037abd1-08fa-45f1-a279-cd961046266d"). InnerVolumeSpecName "kube-api-access-45qzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.553202 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a037abd1-08fa-45f1-a279-cd961046266d" (UID: "a037abd1-08fa-45f1-a279-cd961046266d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.561207 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a037abd1-08fa-45f1-a279-cd961046266d" (UID: "a037abd1-08fa-45f1-a279-cd961046266d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.568647 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a037abd1-08fa-45f1-a279-cd961046266d" (UID: "a037abd1-08fa-45f1-a279-cd961046266d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.586732 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a037abd1-08fa-45f1-a279-cd961046266d" (UID: "a037abd1-08fa-45f1-a279-cd961046266d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.590355 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.590375 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45qzd\" (UniqueName: \"kubernetes.io/projected/a037abd1-08fa-45f1-a279-cd961046266d-kube-api-access-45qzd\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.590388 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.590399 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.590411 4995 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.614405 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-config" (OuterVolumeSpecName: "config") pod "a037abd1-08fa-45f1-a279-cd961046266d" (UID: "a037abd1-08fa-45f1-a279-cd961046266d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.624815 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7dc8cbd4f9-qkmt7" event={"ID":"a79e57d7-d6cb-43fa-b790-822e3e532591","Type":"ContainerStarted","Data":"7efb64a188a9d22e7b98ef5e63ce0f4a77f49c4017b007ec5fdcf440b5c58941"} Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.631424 4995 generic.go:334] "Generic (PLEG): container finished" podID="a037abd1-08fa-45f1-a279-cd961046266d" containerID="7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e" exitCode=0 Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.631524 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.631527 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" event={"ID":"a037abd1-08fa-45f1-a279-cd961046266d","Type":"ContainerDied","Data":"7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e"} Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.631584 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-qms8s" event={"ID":"a037abd1-08fa-45f1-a279-cd961046266d","Type":"ContainerDied","Data":"83ed512829faf3db1e16c4bd6a07f306acdf46297641bc952189379ac41ed055"} Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.631601 4995 scope.go:117] "RemoveContainer" containerID="7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.636944 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" event={"ID":"60daeddb-7af3-4e92-89e6-5a302af596a0","Type":"ContainerStarted","Data":"dc486d3973385a5e54b441dd6c864dabbe44383b818afe9b3bd4688e3b444f0a"} Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.673312 4995 scope.go:117] "RemoveContainer" containerID="df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.691971 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a037abd1-08fa-45f1-a279-cd961046266d-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.695374 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-qms8s"] Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.705027 4995 scope.go:117] "RemoveContainer" containerID="7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e" Jan 20 16:49:55 crc kubenswrapper[4995]: E0120 16:49:55.706398 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e\": container with ID starting with 7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e not found: ID does not exist" containerID="7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.706436 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e"} err="failed to get container status \"7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e\": rpc error: code = NotFound desc = could not find container \"7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e\": container with ID starting with 7d74f25985b51eb9347882146fb14bff971e83fcd8907c5da2d5f8a1c466489e not found: ID does not exist" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.706457 4995 scope.go:117] "RemoveContainer" containerID="df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e" Jan 20 16:49:55 crc kubenswrapper[4995]: E0120 16:49:55.707925 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e\": container with ID starting with df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e not found: ID does not exist" containerID="df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.707952 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e"} err="failed to get container status \"df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e\": rpc error: code = NotFound desc = could not find container \"df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e\": container with ID starting with df3dc3f166e153e8ca7a8e368b1cc9f4a8c5c531383e644d1bef6f29069b918e not found: ID does not exist" Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.711426 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-qms8s"] Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.778167 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-hmlm7"] Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.802424 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-cc4hb"] Jan 20 16:49:55 crc kubenswrapper[4995]: I0120 16:49:55.817137 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-nsnv8"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.008442 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a037abd1-08fa-45f1-a279-cd961046266d" path="/var/lib/kubelet/pods/a037abd1-08fa-45f1-a279-cd961046266d/volumes" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.066993 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.315815 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.329143 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-mzctz"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.339887 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-4zjlv"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.379476 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6db4b5b7df-cv7h4"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.464819 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-fcjgj"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.477978 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.511201 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.595610 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7dc8cbd4f9-qkmt7"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.659496 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.665810 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-588ff59d7c-f4fvk"] Jan 20 16:49:56 crc kubenswrapper[4995]: E0120 16:49:56.666261 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a037abd1-08fa-45f1-a279-cd961046266d" containerName="init" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.666278 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a037abd1-08fa-45f1-a279-cd961046266d" containerName="init" Jan 20 16:49:56 crc kubenswrapper[4995]: E0120 16:49:56.666288 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a037abd1-08fa-45f1-a279-cd961046266d" containerName="dnsmasq-dns" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.666296 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a037abd1-08fa-45f1-a279-cd961046266d" containerName="dnsmasq-dns" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.666505 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="a037abd1-08fa-45f1-a279-cd961046266d" containerName="dnsmasq-dns" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.667408 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.673405 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-588ff59d7c-f4fvk"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.700029 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.712875 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e905a434-9ee1-4a03-a13d-22cb5b0bd609","Type":"ContainerStarted","Data":"cff03420b6578f974657c20dd93f1974691b1bfd329729cde37685082bc49356"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.716578 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-mzctz" event={"ID":"c8a61b44-4464-497c-881e-bdc0d9063bd9","Type":"ContainerStarted","Data":"f39979b8665f447ade03625d209c3acbc4ea8cb034ef7b8abe0bd8bd3050d2d9"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.720116 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6db4b5b7df-cv7h4" event={"ID":"701cf418-d6f5-4326-b237-2fd120de4bd3","Type":"ContainerStarted","Data":"c9f3e1f5fd4ca075bbc145faefb918f6a017aba7db6bc9014583e64d1efe4cff"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.734192 4995 generic.go:334] "Generic (PLEG): container finished" podID="60daeddb-7af3-4e92-89e6-5a302af596a0" containerID="9827ac5e51251ba87b0de14d29c65920b13d724e690e56deb06fdb5c0f9bd291" exitCode=0 Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.734265 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" event={"ID":"60daeddb-7af3-4e92-89e6-5a302af596a0","Type":"ContainerDied","Data":"9827ac5e51251ba87b0de14d29c65920b13d724e690e56deb06fdb5c0f9bd291"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.741606 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3afe443-c0e4-49f2-9245-29db8eeefba5-horizon-secret-key\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.741668 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lg6jt\" (UniqueName: \"kubernetes.io/projected/d3afe443-c0e4-49f2-9245-29db8eeefba5-kube-api-access-lg6jt\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.741977 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-scripts\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.742027 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-config-data\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.742105 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3afe443-c0e4-49f2-9245-29db8eeefba5-logs\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.745657 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" event={"ID":"f12a8420-f199-4df8-8e65-66fe1a2d9fce","Type":"ContainerStarted","Data":"7d5c84c72d48c2f60d942da043d8a24bfdaaed224c0ced1ce18559898170020b"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.762474 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-cc4hb" event={"ID":"6cb81edf-880e-421a-bc37-258db15b1ad9","Type":"ContainerStarted","Data":"9b648be5c2d42cc9ce883c167ec3dd86c2cbe66b2173d9965233d68b19bb6690"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.774497 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4zjlv" event={"ID":"40362747-51a2-473e-845c-3427003b9b7a","Type":"ContainerStarted","Data":"c6bf705a603203aaad7336df23f3ec0e72f4a1e2371f2c8b89365df5f4fd0ee8"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.778621 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hmlm7" event={"ID":"53794c82-829c-4b77-b902-01be2130f0b8","Type":"ContainerStarted","Data":"4517099c2b1e4ce25f1f586f11fb0c280d019e739b6a8406792f9c94491d4697"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.832014 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-nsnv8" event={"ID":"12094649-404d-4452-a514-256d83ae33e1","Type":"ContainerStarted","Data":"eec45b2cea5651bc2a81854648aa2f0a8fd3c0b130b5982c1fee687be4f46b9f"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.832056 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-nsnv8" event={"ID":"12094649-404d-4452-a514-256d83ae33e1","Type":"ContainerStarted","Data":"9abef3cd62c5b7c8500fcf40dbe51ff6e58cf48e3ba3c4825c74faed7a932222"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.843219 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3afe443-c0e4-49f2-9245-29db8eeefba5-logs\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.843280 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3afe443-c0e4-49f2-9245-29db8eeefba5-horizon-secret-key\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.843311 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lg6jt\" (UniqueName: \"kubernetes.io/projected/d3afe443-c0e4-49f2-9245-29db8eeefba5-kube-api-access-lg6jt\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.843510 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-scripts\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.843542 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-config-data\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.843818 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3afe443-c0e4-49f2-9245-29db8eeefba5-logs\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.844596 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-scripts\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.845096 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-config-data\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.851125 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3afe443-c0e4-49f2-9245-29db8eeefba5-horizon-secret-key\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.866818 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6bff689a-b9d5-4a27-a93b-6c4105a6f88c","Type":"ContainerStarted","Data":"fdf7556d784843cda0bfe4f251e5cfd5687516b2a1dd929188e42228994ff79c"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.903367 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-nsnv8" podStartSLOduration=3.902663207 podStartE2EDuration="3.902663207s" podCreationTimestamp="2026-01-20 16:49:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:56.888663247 +0000 UTC m=+1115.133268053" watchObservedRunningTime="2026-01-20 16:49:56.902663207 +0000 UTC m=+1115.147268013" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.915571 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e646811-19e9-4a68-a419-6d0db9feb93e","Type":"ContainerStarted","Data":"1933e45f4ec3de87b94ea5b28f797c45168e555ccd6ff0d96212cd11ac314fe0"} Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.915637 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.916035 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:56 crc kubenswrapper[4995]: I0120 16:49:56.946098 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.116772 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lg6jt\" (UniqueName: \"kubernetes.io/projected/d3afe443-c0e4-49f2-9245-29db8eeefba5-kube-api-access-lg6jt\") pod \"horizon-588ff59d7c-f4fvk\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.313926 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.333789 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.466568 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-config\") pod \"60daeddb-7af3-4e92-89e6-5a302af596a0\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.466718 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-nb\") pod \"60daeddb-7af3-4e92-89e6-5a302af596a0\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.466801 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-swift-storage-0\") pod \"60daeddb-7af3-4e92-89e6-5a302af596a0\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.466830 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2qpn\" (UniqueName: \"kubernetes.io/projected/60daeddb-7af3-4e92-89e6-5a302af596a0-kube-api-access-b2qpn\") pod \"60daeddb-7af3-4e92-89e6-5a302af596a0\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.466885 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-svc\") pod \"60daeddb-7af3-4e92-89e6-5a302af596a0\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.466915 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-sb\") pod \"60daeddb-7af3-4e92-89e6-5a302af596a0\" (UID: \"60daeddb-7af3-4e92-89e6-5a302af596a0\") " Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.486559 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60daeddb-7af3-4e92-89e6-5a302af596a0-kube-api-access-b2qpn" (OuterVolumeSpecName: "kube-api-access-b2qpn") pod "60daeddb-7af3-4e92-89e6-5a302af596a0" (UID: "60daeddb-7af3-4e92-89e6-5a302af596a0"). InnerVolumeSpecName "kube-api-access-b2qpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.526612 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "60daeddb-7af3-4e92-89e6-5a302af596a0" (UID: "60daeddb-7af3-4e92-89e6-5a302af596a0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.545805 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "60daeddb-7af3-4e92-89e6-5a302af596a0" (UID: "60daeddb-7af3-4e92-89e6-5a302af596a0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.545876 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-config" (OuterVolumeSpecName: "config") pod "60daeddb-7af3-4e92-89e6-5a302af596a0" (UID: "60daeddb-7af3-4e92-89e6-5a302af596a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.549243 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "60daeddb-7af3-4e92-89e6-5a302af596a0" (UID: "60daeddb-7af3-4e92-89e6-5a302af596a0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.569191 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2qpn\" (UniqueName: \"kubernetes.io/projected/60daeddb-7af3-4e92-89e6-5a302af596a0-kube-api-access-b2qpn\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.569228 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.569237 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.569245 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.569253 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.589804 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "60daeddb-7af3-4e92-89e6-5a302af596a0" (UID: "60daeddb-7af3-4e92-89e6-5a302af596a0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.670350 4995 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/60daeddb-7af3-4e92-89e6-5a302af596a0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.913285 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-588ff59d7c-f4fvk"] Jan 20 16:49:57 crc kubenswrapper[4995]: W0120 16:49:57.920046 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3afe443_c0e4_49f2_9245_29db8eeefba5.slice/crio-f1f1d961013141971b78badc753cf28255d0ea76ad41f05356f6b162c3588969 WatchSource:0}: Error finding container f1f1d961013141971b78badc753cf28255d0ea76ad41f05356f6b162c3588969: Status 404 returned error can't find the container with id f1f1d961013141971b78badc753cf28255d0ea76ad41f05356f6b162c3588969 Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.935471 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" event={"ID":"60daeddb-7af3-4e92-89e6-5a302af596a0","Type":"ContainerDied","Data":"dc486d3973385a5e54b441dd6c864dabbe44383b818afe9b3bd4688e3b444f0a"} Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.935509 4995 scope.go:117] "RemoveContainer" containerID="9827ac5e51251ba87b0de14d29c65920b13d724e690e56deb06fdb5c0f9bd291" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.935610 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-gfbpk" Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.946415 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6bff689a-b9d5-4a27-a93b-6c4105a6f88c","Type":"ContainerStarted","Data":"8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38"} Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.950710 4995 generic.go:334] "Generic (PLEG): container finished" podID="f12a8420-f199-4df8-8e65-66fe1a2d9fce" containerID="2b8dc37efe02950c324e009629b2907d0457b29306257e011700729b69d5ba67" exitCode=0 Jan 20 16:49:57 crc kubenswrapper[4995]: I0120 16:49:57.950814 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" event={"ID":"f12a8420-f199-4df8-8e65-66fe1a2d9fce","Type":"ContainerDied","Data":"2b8dc37efe02950c324e009629b2907d0457b29306257e011700729b69d5ba67"} Jan 20 16:49:58 crc kubenswrapper[4995]: I0120 16:49:58.064335 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-mzctz" event={"ID":"c8a61b44-4464-497c-881e-bdc0d9063bd9","Type":"ContainerStarted","Data":"d2d4f341556f481ad7e5304853ff66e49b89bc2b9ed38f644d8d2401c9ed629c"} Jan 20 16:49:58 crc kubenswrapper[4995]: I0120 16:49:58.064393 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-gfbpk"] Jan 20 16:49:58 crc kubenswrapper[4995]: I0120 16:49:58.064412 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-gfbpk"] Jan 20 16:49:58 crc kubenswrapper[4995]: I0120 16:49:58.084731 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 20 16:49:58 crc kubenswrapper[4995]: I0120 16:49:58.088801 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-mzctz" podStartSLOduration=4.088787998 podStartE2EDuration="4.088787998s" podCreationTimestamp="2026-01-20 16:49:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:58.08481802 +0000 UTC m=+1116.329422846" watchObservedRunningTime="2026-01-20 16:49:58.088787998 +0000 UTC m=+1116.333392804" Jan 20 16:49:59 crc kubenswrapper[4995]: I0120 16:49:59.090632 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" event={"ID":"f12a8420-f199-4df8-8e65-66fe1a2d9fce","Type":"ContainerStarted","Data":"b1bb280977cc756f6c7053ebf4ed7c9d3f5f773859d6744c11e27060c89d7b06"} Jan 20 16:49:59 crc kubenswrapper[4995]: I0120 16:49:59.091327 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:49:59 crc kubenswrapper[4995]: I0120 16:49:59.097002 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e905a434-9ee1-4a03-a13d-22cb5b0bd609","Type":"ContainerStarted","Data":"026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e"} Jan 20 16:49:59 crc kubenswrapper[4995]: I0120 16:49:59.116278 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" podStartSLOduration=5.1162586 podStartE2EDuration="5.1162586s" podCreationTimestamp="2026-01-20 16:49:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:59.113815313 +0000 UTC m=+1117.358420129" watchObservedRunningTime="2026-01-20 16:49:59.1162586 +0000 UTC m=+1117.360863406" Jan 20 16:49:59 crc kubenswrapper[4995]: I0120 16:49:59.122858 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-588ff59d7c-f4fvk" event={"ID":"d3afe443-c0e4-49f2-9245-29db8eeefba5","Type":"ContainerStarted","Data":"f1f1d961013141971b78badc753cf28255d0ea76ad41f05356f6b162c3588969"} Jan 20 16:49:59 crc kubenswrapper[4995]: I0120 16:49:59.148459 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerName="glance-log" containerID="cri-o://8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38" gracePeriod=30 Jan 20 16:49:59 crc kubenswrapper[4995]: I0120 16:49:59.148956 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerName="glance-httpd" containerID="cri-o://6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf" gracePeriod=30 Jan 20 16:49:59 crc kubenswrapper[4995]: I0120 16:49:59.149202 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6bff689a-b9d5-4a27-a93b-6c4105a6f88c","Type":"ContainerStarted","Data":"6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf"} Jan 20 16:49:59 crc kubenswrapper[4995]: I0120 16:49:59.183633 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.183606275 podStartE2EDuration="5.183606275s" podCreationTimestamp="2026-01-20 16:49:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:49:59.173911081 +0000 UTC m=+1117.418515887" watchObservedRunningTime="2026-01-20 16:49:59.183606275 +0000 UTC m=+1117.428211081" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.005987 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60daeddb-7af3-4e92-89e6-5a302af596a0" path="/var/lib/kubelet/pods/60daeddb-7af3-4e92-89e6-5a302af596a0/volumes" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.033851 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.127854 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-config-data\") pod \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.127942 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qjwh\" (UniqueName: \"kubernetes.io/projected/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-kube-api-access-9qjwh\") pod \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.127961 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-public-tls-certs\") pod \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.128023 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-scripts\") pod \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.128087 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-combined-ca-bundle\") pod \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.128170 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.128202 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-httpd-run\") pod \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.128227 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-logs\") pod \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\" (UID: \"6bff689a-b9d5-4a27-a93b-6c4105a6f88c\") " Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.129012 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-logs" (OuterVolumeSpecName: "logs") pod "6bff689a-b9d5-4a27-a93b-6c4105a6f88c" (UID: "6bff689a-b9d5-4a27-a93b-6c4105a6f88c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.132503 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6bff689a-b9d5-4a27-a93b-6c4105a6f88c" (UID: "6bff689a-b9d5-4a27-a93b-6c4105a6f88c"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.134140 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "6bff689a-b9d5-4a27-a93b-6c4105a6f88c" (UID: "6bff689a-b9d5-4a27-a93b-6c4105a6f88c"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.134317 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-scripts" (OuterVolumeSpecName: "scripts") pod "6bff689a-b9d5-4a27-a93b-6c4105a6f88c" (UID: "6bff689a-b9d5-4a27-a93b-6c4105a6f88c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.138306 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-kube-api-access-9qjwh" (OuterVolumeSpecName: "kube-api-access-9qjwh") pod "6bff689a-b9d5-4a27-a93b-6c4105a6f88c" (UID: "6bff689a-b9d5-4a27-a93b-6c4105a6f88c"). InnerVolumeSpecName "kube-api-access-9qjwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.160459 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e905a434-9ee1-4a03-a13d-22cb5b0bd609","Type":"ContainerStarted","Data":"5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c"} Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.160597 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerName="glance-log" containerID="cri-o://026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e" gracePeriod=30 Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.161006 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerName="glance-httpd" containerID="cri-o://5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c" gracePeriod=30 Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.161179 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6bff689a-b9d5-4a27-a93b-6c4105a6f88c" (UID: "6bff689a-b9d5-4a27-a93b-6c4105a6f88c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.167432 4995 generic.go:334] "Generic (PLEG): container finished" podID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerID="6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf" exitCode=0 Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.167476 4995 generic.go:334] "Generic (PLEG): container finished" podID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerID="8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38" exitCode=143 Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.168476 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.168670 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6bff689a-b9d5-4a27-a93b-6c4105a6f88c","Type":"ContainerDied","Data":"6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf"} Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.168709 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6bff689a-b9d5-4a27-a93b-6c4105a6f88c","Type":"ContainerDied","Data":"8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38"} Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.168726 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6bff689a-b9d5-4a27-a93b-6c4105a6f88c","Type":"ContainerDied","Data":"fdf7556d784843cda0bfe4f251e5cfd5687516b2a1dd929188e42228994ff79c"} Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.168744 4995 scope.go:117] "RemoveContainer" containerID="6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.190504 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.190489268 podStartE2EDuration="6.190489268s" podCreationTimestamp="2026-01-20 16:49:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:00.188808383 +0000 UTC m=+1118.433413199" watchObservedRunningTime="2026-01-20 16:50:00.190489268 +0000 UTC m=+1118.435094074" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.201669 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6bff689a-b9d5-4a27-a93b-6c4105a6f88c" (UID: "6bff689a-b9d5-4a27-a93b-6c4105a6f88c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.209453 4995 scope.go:117] "RemoveContainer" containerID="8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.216282 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-config-data" (OuterVolumeSpecName: "config-data") pod "6bff689a-b9d5-4a27-a93b-6c4105a6f88c" (UID: "6bff689a-b9d5-4a27-a93b-6c4105a6f88c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.230530 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.230574 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.230585 4995 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.230593 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.230625 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.230636 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qjwh\" (UniqueName: \"kubernetes.io/projected/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-kube-api-access-9qjwh\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.230644 4995 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.230652 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6bff689a-b9d5-4a27-a93b-6c4105a6f88c-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.231723 4995 scope.go:117] "RemoveContainer" containerID="6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf" Jan 20 16:50:00 crc kubenswrapper[4995]: E0120 16:50:00.233495 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf\": container with ID starting with 6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf not found: ID does not exist" containerID="6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.233534 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf"} err="failed to get container status \"6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf\": rpc error: code = NotFound desc = could not find container \"6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf\": container with ID starting with 6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf not found: ID does not exist" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.233558 4995 scope.go:117] "RemoveContainer" containerID="8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38" Jan 20 16:50:00 crc kubenswrapper[4995]: E0120 16:50:00.233804 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38\": container with ID starting with 8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38 not found: ID does not exist" containerID="8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.233823 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38"} err="failed to get container status \"8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38\": rpc error: code = NotFound desc = could not find container \"8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38\": container with ID starting with 8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38 not found: ID does not exist" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.233837 4995 scope.go:117] "RemoveContainer" containerID="6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.234336 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf"} err="failed to get container status \"6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf\": rpc error: code = NotFound desc = could not find container \"6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf\": container with ID starting with 6794cf0a68ed74f12c4db21c8e99e36ce1f6a7a5d6394549f795cf6e58e1cbbf not found: ID does not exist" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.234354 4995 scope.go:117] "RemoveContainer" containerID="8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.234876 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38"} err="failed to get container status \"8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38\": rpc error: code = NotFound desc = could not find container \"8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38\": container with ID starting with 8151f392e11a31b0c92b5832c2cb4a8616c810ea9e503ef4d5ad5a1461322b38 not found: ID does not exist" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.253252 4995 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.336389 4995 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.508987 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.519344 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.543172 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:00 crc kubenswrapper[4995]: E0120 16:50:00.543587 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerName="glance-log" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.543604 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerName="glance-log" Jan 20 16:50:00 crc kubenswrapper[4995]: E0120 16:50:00.543618 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60daeddb-7af3-4e92-89e6-5a302af596a0" containerName="init" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.543624 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="60daeddb-7af3-4e92-89e6-5a302af596a0" containerName="init" Jan 20 16:50:00 crc kubenswrapper[4995]: E0120 16:50:00.543636 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerName="glance-httpd" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.543644 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerName="glance-httpd" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.543846 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerName="glance-httpd" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.543865 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" containerName="glance-log" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.543889 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="60daeddb-7af3-4e92-89e6-5a302af596a0" containerName="init" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.544830 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.549950 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.550017 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.552769 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.589884 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.590219 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.647909 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-config-data\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.647989 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.648032 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.648047 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-scripts\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.648094 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.648130 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6qgw\" (UniqueName: \"kubernetes.io/projected/2e22732f-c280-4803-b3d6-8f5a4d0ab632-kube-api-access-z6qgw\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.648164 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-logs\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.648191 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.750199 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.750276 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.750302 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-scripts\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.750357 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.750385 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6qgw\" (UniqueName: \"kubernetes.io/projected/2e22732f-c280-4803-b3d6-8f5a4d0ab632-kube-api-access-z6qgw\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.750431 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-logs\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.750464 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.750505 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-config-data\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.751522 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.751623 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.752061 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-logs\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.756982 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-config-data\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.759014 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.760811 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-scripts\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.761963 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.769247 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6qgw\" (UniqueName: \"kubernetes.io/projected/2e22732f-c280-4803-b3d6-8f5a4d0ab632-kube-api-access-z6qgw\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.798253 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:00 crc kubenswrapper[4995]: I0120 16:50:00.896933 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.007830 4995 generic.go:334] "Generic (PLEG): container finished" podID="12094649-404d-4452-a514-256d83ae33e1" containerID="eec45b2cea5651bc2a81854648aa2f0a8fd3c0b130b5982c1fee687be4f46b9f" exitCode=0 Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.021917 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bff689a-b9d5-4a27-a93b-6c4105a6f88c" path="/var/lib/kubelet/pods/6bff689a-b9d5-4a27-a93b-6c4105a6f88c/volumes" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.022704 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-nsnv8" event={"ID":"12094649-404d-4452-a514-256d83ae33e1","Type":"ContainerDied","Data":"eec45b2cea5651bc2a81854648aa2f0a8fd3c0b130b5982c1fee687be4f46b9f"} Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.070401 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.070941 4995 generic.go:334] "Generic (PLEG): container finished" podID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerID="5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c" exitCode=0 Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.071019 4995 generic.go:334] "Generic (PLEG): container finished" podID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerID="026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e" exitCode=143 Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.071821 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e905a434-9ee1-4a03-a13d-22cb5b0bd609","Type":"ContainerDied","Data":"5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c"} Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.071954 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e905a434-9ee1-4a03-a13d-22cb5b0bd609","Type":"ContainerDied","Data":"026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e"} Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.072015 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e905a434-9ee1-4a03-a13d-22cb5b0bd609","Type":"ContainerDied","Data":"cff03420b6578f974657c20dd93f1974691b1bfd329729cde37685082bc49356"} Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.072089 4995 scope.go:117] "RemoveContainer" containerID="5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.082172 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-internal-tls-certs\") pod \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.185025 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4dgv\" (UniqueName: \"kubernetes.io/projected/e905a434-9ee1-4a03-a13d-22cb5b0bd609-kube-api-access-p4dgv\") pod \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.186356 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-httpd-run\") pod \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.186410 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-scripts\") pod \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.186790 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-logs\") pod \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.188224 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.188364 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-config-data\") pod \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.188878 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-combined-ca-bundle\") pod \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\" (UID: \"e905a434-9ee1-4a03-a13d-22cb5b0bd609\") " Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.186931 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e905a434-9ee1-4a03-a13d-22cb5b0bd609" (UID: "e905a434-9ee1-4a03-a13d-22cb5b0bd609"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.189253 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-logs" (OuterVolumeSpecName: "logs") pod "e905a434-9ee1-4a03-a13d-22cb5b0bd609" (UID: "e905a434-9ee1-4a03-a13d-22cb5b0bd609"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.193881 4995 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.193919 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e905a434-9ee1-4a03-a13d-22cb5b0bd609-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.204605 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e905a434-9ee1-4a03-a13d-22cb5b0bd609-kube-api-access-p4dgv" (OuterVolumeSpecName: "kube-api-access-p4dgv") pod "e905a434-9ee1-4a03-a13d-22cb5b0bd609" (UID: "e905a434-9ee1-4a03-a13d-22cb5b0bd609"). InnerVolumeSpecName "kube-api-access-p4dgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.206232 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-scripts" (OuterVolumeSpecName: "scripts") pod "e905a434-9ee1-4a03-a13d-22cb5b0bd609" (UID: "e905a434-9ee1-4a03-a13d-22cb5b0bd609"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.229488 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e905a434-9ee1-4a03-a13d-22cb5b0bd609" (UID: "e905a434-9ee1-4a03-a13d-22cb5b0bd609"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.232244 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "e905a434-9ee1-4a03-a13d-22cb5b0bd609" (UID: "e905a434-9ee1-4a03-a13d-22cb5b0bd609"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.272360 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-config-data" (OuterVolumeSpecName: "config-data") pod "e905a434-9ee1-4a03-a13d-22cb5b0bd609" (UID: "e905a434-9ee1-4a03-a13d-22cb5b0bd609"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.296850 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e905a434-9ee1-4a03-a13d-22cb5b0bd609" (UID: "e905a434-9ee1-4a03-a13d-22cb5b0bd609"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.298033 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4dgv\" (UniqueName: \"kubernetes.io/projected/e905a434-9ee1-4a03-a13d-22cb5b0bd609-kube-api-access-p4dgv\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.298058 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.298070 4995 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.298515 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.298534 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.298545 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e905a434-9ee1-4a03-a13d-22cb5b0bd609-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.370930 4995 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.399837 4995 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.703131 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.738599 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6db4b5b7df-cv7h4"] Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.764221 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-84df7dbffb-njbnq"] Jan 20 16:50:02 crc kubenswrapper[4995]: E0120 16:50:02.764873 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerName="glance-httpd" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.764930 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerName="glance-httpd" Jan 20 16:50:02 crc kubenswrapper[4995]: E0120 16:50:02.764967 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerName="glance-log" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.765001 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerName="glance-log" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.765265 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerName="glance-log" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.765298 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" containerName="glance-httpd" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.766740 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.774816 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.781853 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84df7dbffb-njbnq"] Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.792203 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.807832 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-588ff59d7c-f4fvk"] Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.811212 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-config-data\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.811282 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzxhd\" (UniqueName: \"kubernetes.io/projected/8e877da9-408f-40dd-8e4a-5173ba3d6988-kube-api-access-gzxhd\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.811322 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e877da9-408f-40dd-8e4a-5173ba3d6988-logs\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.811356 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-tls-certs\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.811397 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-secret-key\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.811421 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-scripts\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.811444 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-combined-ca-bundle\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.857263 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7cd588cc5b-pmhlg"] Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.860242 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.879922 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7cd588cc5b-pmhlg"] Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.912949 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-secret-key\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913001 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-scripts\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913031 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-logs\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913055 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbsj9\" (UniqueName: \"kubernetes.io/projected/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-kube-api-access-bbsj9\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913080 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-combined-ca-bundle\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913121 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-scripts\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913159 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-config-data\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913201 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzxhd\" (UniqueName: \"kubernetes.io/projected/8e877da9-408f-40dd-8e4a-5173ba3d6988-kube-api-access-gzxhd\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913222 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-config-data\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913251 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e877da9-408f-40dd-8e4a-5173ba3d6988-logs\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913271 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-horizon-tls-certs\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913297 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-combined-ca-bundle\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913318 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-horizon-secret-key\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.913334 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-tls-certs\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.914729 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e877da9-408f-40dd-8e4a-5173ba3d6988-logs\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.915987 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-scripts\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.916039 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-config-data\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.918126 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-tls-certs\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.921150 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-combined-ca-bundle\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.937195 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-secret-key\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:02 crc kubenswrapper[4995]: I0120 16:50:02.940850 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzxhd\" (UniqueName: \"kubernetes.io/projected/8e877da9-408f-40dd-8e4a-5173ba3d6988-kube-api-access-gzxhd\") pod \"horizon-84df7dbffb-njbnq\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.015214 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-logs\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.015254 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbsj9\" (UniqueName: \"kubernetes.io/projected/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-kube-api-access-bbsj9\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.015289 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-scripts\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.015406 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-config-data\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.015457 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-horizon-tls-certs\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.015482 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-combined-ca-bundle\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.015860 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-horizon-secret-key\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.019062 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-scripts\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.019527 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-logs\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.021815 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-horizon-secret-key\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.023321 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-config-data\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.027829 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-horizon-tls-certs\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.036905 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-combined-ca-bundle\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.039788 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbsj9\" (UniqueName: \"kubernetes.io/projected/83a7df1c-c59a-4a4c-b34d-df9fc6711aea-kube-api-access-bbsj9\") pod \"horizon-7cd588cc5b-pmhlg\" (UID: \"83a7df1c-c59a-4a4c-b34d-df9fc6711aea\") " pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.088675 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.113460 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.133405 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.165834 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.192592 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.201696 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.202111 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.204420 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.208080 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.219697 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.219760 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.219808 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.219831 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.219851 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.219873 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc6xr\" (UniqueName: \"kubernetes.io/projected/3edee2ed-6825-49d5-9556-a33a54331f20-kube-api-access-qc6xr\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.219916 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-logs\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.219972 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.306773 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.320918 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.320968 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.320992 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.321012 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc6xr\" (UniqueName: \"kubernetes.io/projected/3edee2ed-6825-49d5-9556-a33a54331f20-kube-api-access-qc6xr\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.321056 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-logs\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.321114 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.321138 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.321191 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.321546 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.321740 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.323177 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-logs\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.343185 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.343824 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.343950 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.347390 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc6xr\" (UniqueName: \"kubernetes.io/projected/3edee2ed-6825-49d5-9556-a33a54331f20-kube-api-access-qc6xr\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.352522 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.451719 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:50:03 crc kubenswrapper[4995]: I0120 16:50:03.528234 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:04 crc kubenswrapper[4995]: I0120 16:50:04.003909 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e905a434-9ee1-4a03-a13d-22cb5b0bd609" path="/var/lib/kubelet/pods/e905a434-9ee1-4a03-a13d-22cb5b0bd609/volumes" Jan 20 16:50:05 crc kubenswrapper[4995]: I0120 16:50:05.171267 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:50:05 crc kubenswrapper[4995]: I0120 16:50:05.227853 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2z2z5"] Jan 20 16:50:05 crc kubenswrapper[4995]: I0120 16:50:05.228117 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-2z2z5" podUID="c030f925-c98d-4500-bcff-340a978d5fbc" containerName="dnsmasq-dns" containerID="cri-o://3a72c3fa3e75610ecc6e0f011ed410818051290cd5579ebfd79ef48c6e033505" gracePeriod=10 Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.131506 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.133013 4995 generic.go:334] "Generic (PLEG): container finished" podID="c030f925-c98d-4500-bcff-340a978d5fbc" containerID="3a72c3fa3e75610ecc6e0f011ed410818051290cd5579ebfd79ef48c6e033505" exitCode=0 Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.133076 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2z2z5" event={"ID":"c030f925-c98d-4500-bcff-340a978d5fbc","Type":"ContainerDied","Data":"3a72c3fa3e75610ecc6e0f011ed410818051290cd5579ebfd79ef48c6e033505"} Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.134111 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2e22732f-c280-4803-b3d6-8f5a4d0ab632","Type":"ContainerStarted","Data":"3f2d0a67feac908f2bf6727767a7d4f3eb9152d996f81e088243c20f92493a60"} Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.135639 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-nsnv8" event={"ID":"12094649-404d-4452-a514-256d83ae33e1","Type":"ContainerDied","Data":"9abef3cd62c5b7c8500fcf40dbe51ff6e58cf48e3ba3c4825c74faed7a932222"} Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.135666 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9abef3cd62c5b7c8500fcf40dbe51ff6e58cf48e3ba3c4825c74faed7a932222" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.135714 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-nsnv8" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.276154 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-scripts\") pod \"12094649-404d-4452-a514-256d83ae33e1\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.277430 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-combined-ca-bundle\") pod \"12094649-404d-4452-a514-256d83ae33e1\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.277494 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-config-data\") pod \"12094649-404d-4452-a514-256d83ae33e1\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.277517 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-credential-keys\") pod \"12094649-404d-4452-a514-256d83ae33e1\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.277594 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kn92v\" (UniqueName: \"kubernetes.io/projected/12094649-404d-4452-a514-256d83ae33e1-kube-api-access-kn92v\") pod \"12094649-404d-4452-a514-256d83ae33e1\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.277686 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-fernet-keys\") pod \"12094649-404d-4452-a514-256d83ae33e1\" (UID: \"12094649-404d-4452-a514-256d83ae33e1\") " Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.285188 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "12094649-404d-4452-a514-256d83ae33e1" (UID: "12094649-404d-4452-a514-256d83ae33e1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.288256 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-scripts" (OuterVolumeSpecName: "scripts") pod "12094649-404d-4452-a514-256d83ae33e1" (UID: "12094649-404d-4452-a514-256d83ae33e1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.288779 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "12094649-404d-4452-a514-256d83ae33e1" (UID: "12094649-404d-4452-a514-256d83ae33e1"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.305298 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12094649-404d-4452-a514-256d83ae33e1-kube-api-access-kn92v" (OuterVolumeSpecName: "kube-api-access-kn92v") pod "12094649-404d-4452-a514-256d83ae33e1" (UID: "12094649-404d-4452-a514-256d83ae33e1"). InnerVolumeSpecName "kube-api-access-kn92v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.314387 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "12094649-404d-4452-a514-256d83ae33e1" (UID: "12094649-404d-4452-a514-256d83ae33e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.321230 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-config-data" (OuterVolumeSpecName: "config-data") pod "12094649-404d-4452-a514-256d83ae33e1" (UID: "12094649-404d-4452-a514-256d83ae33e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.379521 4995 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.379558 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.379567 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.379578 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.379589 4995 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12094649-404d-4452-a514-256d83ae33e1-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:06 crc kubenswrapper[4995]: I0120 16:50:06.379597 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kn92v\" (UniqueName: \"kubernetes.io/projected/12094649-404d-4452-a514-256d83ae33e1-kube-api-access-kn92v\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.290776 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-nsnv8"] Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.295043 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-nsnv8"] Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.399564 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-shkpd"] Jan 20 16:50:07 crc kubenswrapper[4995]: E0120 16:50:07.400021 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12094649-404d-4452-a514-256d83ae33e1" containerName="keystone-bootstrap" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.400048 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="12094649-404d-4452-a514-256d83ae33e1" containerName="keystone-bootstrap" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.400387 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="12094649-404d-4452-a514-256d83ae33e1" containerName="keystone-bootstrap" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.401176 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.404021 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-m8prt" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.404453 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.404713 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.405771 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.405917 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.411739 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-shkpd"] Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.500333 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-fernet-keys\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.500679 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8lj4\" (UniqueName: \"kubernetes.io/projected/eabde76f-904d-4313-8013-f17d65cc178f-kube-api-access-f8lj4\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.500720 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-config-data\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.500772 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-combined-ca-bundle\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.500791 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-scripts\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.500827 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-credential-keys\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.603162 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-fernet-keys\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.603322 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8lj4\" (UniqueName: \"kubernetes.io/projected/eabde76f-904d-4313-8013-f17d65cc178f-kube-api-access-f8lj4\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.603375 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-config-data\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.603439 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-combined-ca-bundle\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.603460 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-scripts\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.603499 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-credential-keys\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.611442 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-config-data\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.614834 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-fernet-keys\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.615812 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-combined-ca-bundle\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.617220 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-credential-keys\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.642206 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-scripts\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.644920 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8lj4\" (UniqueName: \"kubernetes.io/projected/eabde76f-904d-4313-8013-f17d65cc178f-kube-api-access-f8lj4\") pod \"keystone-bootstrap-shkpd\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:07 crc kubenswrapper[4995]: I0120 16:50:07.742371 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:08 crc kubenswrapper[4995]: I0120 16:50:08.001502 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12094649-404d-4452-a514-256d83ae33e1" path="/var/lib/kubelet/pods/12094649-404d-4452-a514-256d83ae33e1/volumes" Jan 20 16:50:13 crc kubenswrapper[4995]: I0120 16:50:13.840550 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-2z2z5" podUID="c030f925-c98d-4500-bcff-340a978d5fbc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.120:5353: i/o timeout" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.675838 4995 scope.go:117] "RemoveContainer" containerID="026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e" Jan 20 16:50:14 crc kubenswrapper[4995]: E0120 16:50:14.678476 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 20 16:50:14 crc kubenswrapper[4995]: E0120 16:50:14.678655 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n559h59bh646h697h56ch599h5ffh69h56ch75h5b6h574h64ch5bch94h696h547h65ch57fh57ch685h65chc8hc7hdhf5hd8hdh556h655hbch64cq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cp7lj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-7dc8cbd4f9-qkmt7_openstack(a79e57d7-d6cb-43fa-b790-822e3e532591): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:50:14 crc kubenswrapper[4995]: E0120 16:50:14.680353 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-7dc8cbd4f9-qkmt7" podUID="a79e57d7-d6cb-43fa-b790-822e3e532591" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.749065 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.842396 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tszf\" (UniqueName: \"kubernetes.io/projected/c030f925-c98d-4500-bcff-340a978d5fbc-kube-api-access-7tszf\") pod \"c030f925-c98d-4500-bcff-340a978d5fbc\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.842469 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-config\") pod \"c030f925-c98d-4500-bcff-340a978d5fbc\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.843219 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-sb\") pod \"c030f925-c98d-4500-bcff-340a978d5fbc\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.843451 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-dns-svc\") pod \"c030f925-c98d-4500-bcff-340a978d5fbc\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.843581 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-nb\") pod \"c030f925-c98d-4500-bcff-340a978d5fbc\" (UID: \"c030f925-c98d-4500-bcff-340a978d5fbc\") " Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.849663 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c030f925-c98d-4500-bcff-340a978d5fbc-kube-api-access-7tszf" (OuterVolumeSpecName: "kube-api-access-7tszf") pod "c030f925-c98d-4500-bcff-340a978d5fbc" (UID: "c030f925-c98d-4500-bcff-340a978d5fbc"). InnerVolumeSpecName "kube-api-access-7tszf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.886555 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c030f925-c98d-4500-bcff-340a978d5fbc" (UID: "c030f925-c98d-4500-bcff-340a978d5fbc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.892608 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-config" (OuterVolumeSpecName: "config") pod "c030f925-c98d-4500-bcff-340a978d5fbc" (UID: "c030f925-c98d-4500-bcff-340a978d5fbc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.892724 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c030f925-c98d-4500-bcff-340a978d5fbc" (UID: "c030f925-c98d-4500-bcff-340a978d5fbc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.901566 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c030f925-c98d-4500-bcff-340a978d5fbc" (UID: "c030f925-c98d-4500-bcff-340a978d5fbc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.946494 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.946523 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tszf\" (UniqueName: \"kubernetes.io/projected/c030f925-c98d-4500-bcff-340a978d5fbc-kube-api-access-7tszf\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.946536 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.946544 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:14 crc kubenswrapper[4995]: I0120 16:50:14.946552 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c030f925-c98d-4500-bcff-340a978d5fbc-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:15 crc kubenswrapper[4995]: I0120 16:50:15.231957 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2z2z5" event={"ID":"c030f925-c98d-4500-bcff-340a978d5fbc","Type":"ContainerDied","Data":"c3664fd129bc9a39d8b7b4c54a9273ebe10a37e40a5d7818c41784d341b61a18"} Jan 20 16:50:15 crc kubenswrapper[4995]: I0120 16:50:15.231987 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2z2z5" Jan 20 16:50:15 crc kubenswrapper[4995]: I0120 16:50:15.284188 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2z2z5"] Jan 20 16:50:15 crc kubenswrapper[4995]: I0120 16:50:15.291772 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2z2z5"] Jan 20 16:50:16 crc kubenswrapper[4995]: I0120 16:50:16.009393 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c030f925-c98d-4500-bcff-340a978d5fbc" path="/var/lib/kubelet/pods/c030f925-c98d-4500-bcff-340a978d5fbc/volumes" Jan 20 16:50:18 crc kubenswrapper[4995]: I0120 16:50:18.841632 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-2z2z5" podUID="c030f925-c98d-4500-bcff-340a978d5fbc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.120:5353: i/o timeout" Jan 20 16:50:19 crc kubenswrapper[4995]: I0120 16:50:19.264850 4995 generic.go:334] "Generic (PLEG): container finished" podID="c8a61b44-4464-497c-881e-bdc0d9063bd9" containerID="d2d4f341556f481ad7e5304853ff66e49b89bc2b9ed38f644d8d2401c9ed629c" exitCode=0 Jan 20 16:50:19 crc kubenswrapper[4995]: I0120 16:50:19.264892 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-mzctz" event={"ID":"c8a61b44-4464-497c-881e-bdc0d9063bd9","Type":"ContainerDied","Data":"d2d4f341556f481ad7e5304853ff66e49b89bc2b9ed38f644d8d2401c9ed629c"} Jan 20 16:50:23 crc kubenswrapper[4995]: E0120 16:50:23.965678 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Jan 20 16:50:23 crc kubenswrapper[4995]: E0120 16:50:23.967174 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n59ch59dhd8h544h56dh665h575h67fh99h85h55dh64dh665h659h56h88h55bh698h66ch7fhb6h99hf6h584h5cch67dh574h696h9bh55h98h5ddq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6jr2b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(1e646811-19e9-4a68-a419-6d0db9feb93e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:50:24 crc kubenswrapper[4995]: E0120 16:50:24.494103 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Jan 20 16:50:24 crc kubenswrapper[4995]: E0120 16:50:24.496692 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pjdrq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-cc4hb_openstack(6cb81edf-880e-421a-bc37-258db15b1ad9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:50:24 crc kubenswrapper[4995]: E0120 16:50:24.498018 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-cc4hb" podUID="6cb81edf-880e-421a-bc37-258db15b1ad9" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.596948 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-mzctz" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.601236 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.629728 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-config-data\") pod \"a79e57d7-d6cb-43fa-b790-822e3e532591\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.629786 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-scripts\") pod \"a79e57d7-d6cb-43fa-b790-822e3e532591\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.629831 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cp7lj\" (UniqueName: \"kubernetes.io/projected/a79e57d7-d6cb-43fa-b790-822e3e532591-kube-api-access-cp7lj\") pod \"a79e57d7-d6cb-43fa-b790-822e3e532591\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.629937 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-combined-ca-bundle\") pod \"c8a61b44-4464-497c-881e-bdc0d9063bd9\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.629997 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hq82f\" (UniqueName: \"kubernetes.io/projected/c8a61b44-4464-497c-881e-bdc0d9063bd9-kube-api-access-hq82f\") pod \"c8a61b44-4464-497c-881e-bdc0d9063bd9\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.630024 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-config\") pod \"c8a61b44-4464-497c-881e-bdc0d9063bd9\" (UID: \"c8a61b44-4464-497c-881e-bdc0d9063bd9\") " Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.630121 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a79e57d7-d6cb-43fa-b790-822e3e532591-logs\") pod \"a79e57d7-d6cb-43fa-b790-822e3e532591\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.630163 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a79e57d7-d6cb-43fa-b790-822e3e532591-horizon-secret-key\") pod \"a79e57d7-d6cb-43fa-b790-822e3e532591\" (UID: \"a79e57d7-d6cb-43fa-b790-822e3e532591\") " Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.634812 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a79e57d7-d6cb-43fa-b790-822e3e532591-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "a79e57d7-d6cb-43fa-b790-822e3e532591" (UID: "a79e57d7-d6cb-43fa-b790-822e3e532591"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.637111 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8a61b44-4464-497c-881e-bdc0d9063bd9-kube-api-access-hq82f" (OuterVolumeSpecName: "kube-api-access-hq82f") pod "c8a61b44-4464-497c-881e-bdc0d9063bd9" (UID: "c8a61b44-4464-497c-881e-bdc0d9063bd9"). InnerVolumeSpecName "kube-api-access-hq82f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.638433 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a79e57d7-d6cb-43fa-b790-822e3e532591-logs" (OuterVolumeSpecName: "logs") pod "a79e57d7-d6cb-43fa-b790-822e3e532591" (UID: "a79e57d7-d6cb-43fa-b790-822e3e532591"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.638808 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-scripts" (OuterVolumeSpecName: "scripts") pod "a79e57d7-d6cb-43fa-b790-822e3e532591" (UID: "a79e57d7-d6cb-43fa-b790-822e3e532591"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.641259 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-config-data" (OuterVolumeSpecName: "config-data") pod "a79e57d7-d6cb-43fa-b790-822e3e532591" (UID: "a79e57d7-d6cb-43fa-b790-822e3e532591"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.643290 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a79e57d7-d6cb-43fa-b790-822e3e532591-kube-api-access-cp7lj" (OuterVolumeSpecName: "kube-api-access-cp7lj") pod "a79e57d7-d6cb-43fa-b790-822e3e532591" (UID: "a79e57d7-d6cb-43fa-b790-822e3e532591"). InnerVolumeSpecName "kube-api-access-cp7lj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.668636 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-config" (OuterVolumeSpecName: "config") pod "c8a61b44-4464-497c-881e-bdc0d9063bd9" (UID: "c8a61b44-4464-497c-881e-bdc0d9063bd9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.700450 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c8a61b44-4464-497c-881e-bdc0d9063bd9" (UID: "c8a61b44-4464-497c-881e-bdc0d9063bd9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.732218 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.732261 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hq82f\" (UniqueName: \"kubernetes.io/projected/c8a61b44-4464-497c-881e-bdc0d9063bd9-kube-api-access-hq82f\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.732275 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c8a61b44-4464-497c-881e-bdc0d9063bd9-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.732287 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a79e57d7-d6cb-43fa-b790-822e3e532591-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.732299 4995 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a79e57d7-d6cb-43fa-b790-822e3e532591-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.732309 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.732319 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a79e57d7-d6cb-43fa-b790-822e3e532591-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:24 crc kubenswrapper[4995]: I0120 16:50:24.732329 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cp7lj\" (UniqueName: \"kubernetes.io/projected/a79e57d7-d6cb-43fa-b790-822e3e532591-kube-api-access-cp7lj\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.316654 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-mzctz" event={"ID":"c8a61b44-4464-497c-881e-bdc0d9063bd9","Type":"ContainerDied","Data":"f39979b8665f447ade03625d209c3acbc4ea8cb034ef7b8abe0bd8bd3050d2d9"} Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.316690 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f39979b8665f447ade03625d209c3acbc4ea8cb034ef7b8abe0bd8bd3050d2d9" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.316737 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-mzctz" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.321450 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7dc8cbd4f9-qkmt7" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.324353 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7dc8cbd4f9-qkmt7" event={"ID":"a79e57d7-d6cb-43fa-b790-822e3e532591","Type":"ContainerDied","Data":"7efb64a188a9d22e7b98ef5e63ce0f4a77f49c4017b007ec5fdcf440b5c58941"} Jan 20 16:50:25 crc kubenswrapper[4995]: E0120 16:50:25.325967 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-cc4hb" podUID="6cb81edf-880e-421a-bc37-258db15b1ad9" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.461166 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7dc8cbd4f9-qkmt7"] Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.481868 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7dc8cbd4f9-qkmt7"] Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.889276 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-9jjtp"] Jan 20 16:50:25 crc kubenswrapper[4995]: E0120 16:50:25.889978 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c030f925-c98d-4500-bcff-340a978d5fbc" containerName="init" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.889995 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c030f925-c98d-4500-bcff-340a978d5fbc" containerName="init" Jan 20 16:50:25 crc kubenswrapper[4995]: E0120 16:50:25.890013 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8a61b44-4464-497c-881e-bdc0d9063bd9" containerName="neutron-db-sync" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.890020 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8a61b44-4464-497c-881e-bdc0d9063bd9" containerName="neutron-db-sync" Jan 20 16:50:25 crc kubenswrapper[4995]: E0120 16:50:25.890037 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c030f925-c98d-4500-bcff-340a978d5fbc" containerName="dnsmasq-dns" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.890045 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c030f925-c98d-4500-bcff-340a978d5fbc" containerName="dnsmasq-dns" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.890232 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c030f925-c98d-4500-bcff-340a978d5fbc" containerName="dnsmasq-dns" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.890265 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8a61b44-4464-497c-881e-bdc0d9063bd9" containerName="neutron-db-sync" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.891295 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.910850 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8dfc578d4-g6p6m"] Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.912372 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.916624 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.916818 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-26dnj" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.916922 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.917029 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.929905 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-9jjtp"] Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.944295 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8dfc578d4-g6p6m"] Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.961932 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.961988 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-combined-ca-bundle\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.962019 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-ovndb-tls-certs\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.962176 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-config\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.962409 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.962463 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.962592 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-config\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.962659 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-httpd-config\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.962682 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjp2b\" (UniqueName: \"kubernetes.io/projected/ff583ed2-5f45-45fe-aa25-f02872f482b1-kube-api-access-rjp2b\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.962718 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j87d7\" (UniqueName: \"kubernetes.io/projected/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-kube-api-access-j87d7\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:25 crc kubenswrapper[4995]: I0120 16:50:25.962735 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-svc\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.008186 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a79e57d7-d6cb-43fa-b790-822e3e532591" path="/var/lib/kubelet/pods/a79e57d7-d6cb-43fa-b790-822e3e532591/volumes" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064299 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064356 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-combined-ca-bundle\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064386 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-ovndb-tls-certs\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064414 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-config\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064550 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064572 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064637 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-config\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064679 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-httpd-config\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064702 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjp2b\" (UniqueName: \"kubernetes.io/projected/ff583ed2-5f45-45fe-aa25-f02872f482b1-kube-api-access-rjp2b\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064727 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j87d7\" (UniqueName: \"kubernetes.io/projected/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-kube-api-access-j87d7\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.064742 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-svc\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.065782 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-config\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.065937 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.066177 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.066374 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-svc\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.066902 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.070414 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-combined-ca-bundle\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.071106 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-ovndb-tls-certs\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.071419 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-config\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.085173 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-httpd-config\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.086645 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjp2b\" (UniqueName: \"kubernetes.io/projected/ff583ed2-5f45-45fe-aa25-f02872f482b1-kube-api-access-rjp2b\") pod \"dnsmasq-dns-55f844cf75-9jjtp\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.087897 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j87d7\" (UniqueName: \"kubernetes.io/projected/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-kube-api-access-j87d7\") pod \"neutron-8dfc578d4-g6p6m\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.216805 4995 scope.go:117] "RemoveContainer" containerID="5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.217186 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:26 crc kubenswrapper[4995]: E0120 16:50:26.218328 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c\": container with ID starting with 5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c not found: ID does not exist" containerID="5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.218388 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c"} err="failed to get container status \"5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c\": rpc error: code = NotFound desc = could not find container \"5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c\": container with ID starting with 5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c not found: ID does not exist" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.218423 4995 scope.go:117] "RemoveContainer" containerID="026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e" Jan 20 16:50:26 crc kubenswrapper[4995]: E0120 16:50:26.218797 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e\": container with ID starting with 026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e not found: ID does not exist" containerID="026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.218832 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e"} err="failed to get container status \"026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e\": rpc error: code = NotFound desc = could not find container \"026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e\": container with ID starting with 026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e not found: ID does not exist" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.218857 4995 scope.go:117] "RemoveContainer" containerID="5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.219207 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c"} err="failed to get container status \"5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c\": rpc error: code = NotFound desc = could not find container \"5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c\": container with ID starting with 5d5c0961fb5a0ca66548f6ae6da64f741db946dd03e1d9b51f2f2665b477cf0c not found: ID does not exist" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.219243 4995 scope.go:117] "RemoveContainer" containerID="026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.219496 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e"} err="failed to get container status \"026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e\": rpc error: code = NotFound desc = could not find container \"026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e\": container with ID starting with 026e3e25d104ff1bf461e38971ce023832aa74a9340ec4e1b2f7c2773f703f9e not found: ID does not exist" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.219528 4995 scope.go:117] "RemoveContainer" containerID="3a72c3fa3e75610ecc6e0f011ed410818051290cd5579ebfd79ef48c6e033505" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.239024 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:26 crc kubenswrapper[4995]: E0120 16:50:26.260295 4995 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 20 16:50:26 crc kubenswrapper[4995]: E0120 16:50:26.260447 4995 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-558j4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-hmlm7_openstack(53794c82-829c-4b77-b902-01be2130f0b8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 16:50:26 crc kubenswrapper[4995]: E0120 16:50:26.262339 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-hmlm7" podUID="53794c82-829c-4b77-b902-01be2130f0b8" Jan 20 16:50:26 crc kubenswrapper[4995]: E0120 16:50:26.352507 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-hmlm7" podUID="53794c82-829c-4b77-b902-01be2130f0b8" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.488668 4995 scope.go:117] "RemoveContainer" containerID="866a9da7e7f8b897690d43357405ee102251cf38d557814b2a885fed1f550de7" Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.852744 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84df7dbffb-njbnq"] Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.970678 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:50:26 crc kubenswrapper[4995]: I0120 16:50:26.984449 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7cd588cc5b-pmhlg"] Jan 20 16:50:27 crc kubenswrapper[4995]: W0120 16:50:27.021647 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3edee2ed_6825_49d5_9556_a33a54331f20.slice/crio-efe2942923dec6a397ae7d80bdd1da1ad0fd91ccb274be31c4b2d0c2ffbfd7f2 WatchSource:0}: Error finding container efe2942923dec6a397ae7d80bdd1da1ad0fd91ccb274be31c4b2d0c2ffbfd7f2: Status 404 returned error can't find the container with id efe2942923dec6a397ae7d80bdd1da1ad0fd91ccb274be31c4b2d0c2ffbfd7f2 Jan 20 16:50:27 crc kubenswrapper[4995]: W0120 16:50:27.051307 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83a7df1c_c59a_4a4c_b34d_df9fc6711aea.slice/crio-f2c1aadec98384b9cc680919c8071f5783c3ad4385f55f1f1840b79ad2605fbb WatchSource:0}: Error finding container f2c1aadec98384b9cc680919c8071f5783c3ad4385f55f1f1840b79ad2605fbb: Status 404 returned error can't find the container with id f2c1aadec98384b9cc680919c8071f5783c3ad4385f55f1f1840b79ad2605fbb Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.072149 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-shkpd"] Jan 20 16:50:27 crc kubenswrapper[4995]: W0120 16:50:27.082724 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeabde76f_904d_4313_8013_f17d65cc178f.slice/crio-6690d34bf09b8943ba39972a77aa1e2a22aa355beac922a1204b8e76d5dae046 WatchSource:0}: Error finding container 6690d34bf09b8943ba39972a77aa1e2a22aa355beac922a1204b8e76d5dae046: Status 404 returned error can't find the container with id 6690d34bf09b8943ba39972a77aa1e2a22aa355beac922a1204b8e76d5dae046 Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.088148 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-9jjtp"] Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.088616 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 20 16:50:27 crc kubenswrapper[4995]: W0120 16:50:27.125056 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff583ed2_5f45_45fe_aa25_f02872f482b1.slice/crio-ceda8149265e576b7154304fec6557dd148be6b67d46f2a288bb06e453d6900c WatchSource:0}: Error finding container ceda8149265e576b7154304fec6557dd148be6b67d46f2a288bb06e453d6900c: Status 404 returned error can't find the container with id ceda8149265e576b7154304fec6557dd148be6b67d46f2a288bb06e453d6900c Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.322899 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8dfc578d4-g6p6m"] Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.366831 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6db4b5b7df-cv7h4" event={"ID":"701cf418-d6f5-4326-b237-2fd120de4bd3","Type":"ContainerStarted","Data":"0d07b2ef5695d0b7aade74ffebd5e08dfe97b23fafbe31f5e44ce4e19adc4fa6"} Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.368043 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3edee2ed-6825-49d5-9556-a33a54331f20","Type":"ContainerStarted","Data":"efe2942923dec6a397ae7d80bdd1da1ad0fd91ccb274be31c4b2d0c2ffbfd7f2"} Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.370209 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-shkpd" event={"ID":"eabde76f-904d-4313-8013-f17d65cc178f","Type":"ContainerStarted","Data":"6690d34bf09b8943ba39972a77aa1e2a22aa355beac922a1204b8e76d5dae046"} Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.371928 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" event={"ID":"ff583ed2-5f45-45fe-aa25-f02872f482b1","Type":"ContainerStarted","Data":"ceda8149265e576b7154304fec6557dd148be6b67d46f2a288bb06e453d6900c"} Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.379735 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-588ff59d7c-f4fvk" event={"ID":"d3afe443-c0e4-49f2-9245-29db8eeefba5","Type":"ContainerStarted","Data":"5f7d1b1b1020d3572a3af35cb8858236820c5f35f7db8307b2befccacebdde4f"} Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.381744 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7cd588cc5b-pmhlg" event={"ID":"83a7df1c-c59a-4a4c-b34d-df9fc6711aea","Type":"ContainerStarted","Data":"f2c1aadec98384b9cc680919c8071f5783c3ad4385f55f1f1840b79ad2605fbb"} Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.387448 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-ncfj6" event={"ID":"e5603781-3cf3-41db-bfc7-7dc74d244fd4","Type":"ContainerStarted","Data":"29bf869aa9c0b173a588767f8be2c70c46b4d38c9d6f32d66064158343dde75b"} Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.397107 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84df7dbffb-njbnq" event={"ID":"8e877da9-408f-40dd-8e4a-5173ba3d6988","Type":"ContainerStarted","Data":"f5762486221393b34270e1f452370aae2241bb2d3f32181c7c3ff8712e8cb4ea"} Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.398977 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4zjlv" event={"ID":"40362747-51a2-473e-845c-3427003b9b7a","Type":"ContainerStarted","Data":"cc4db8b534ebf1fe6b4679e9246066f633e666f3cf0eb0bc1048bccf4a0334d1"} Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.416826 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-ncfj6" podStartSLOduration=3.035766096 podStartE2EDuration="1m4.416807347s" podCreationTimestamp="2026-01-20 16:49:23 +0000 UTC" firstStartedPulling="2026-01-20 16:49:24.936603162 +0000 UTC m=+1083.181207968" lastFinishedPulling="2026-01-20 16:50:26.317644413 +0000 UTC m=+1144.562249219" observedRunningTime="2026-01-20 16:50:27.408676647 +0000 UTC m=+1145.653281463" watchObservedRunningTime="2026-01-20 16:50:27.416807347 +0000 UTC m=+1145.661412153" Jan 20 16:50:27 crc kubenswrapper[4995]: I0120 16:50:27.460319 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-4zjlv" podStartSLOduration=5.314461621 podStartE2EDuration="33.460300337s" podCreationTimestamp="2026-01-20 16:49:54 +0000 UTC" firstStartedPulling="2026-01-20 16:49:56.349281381 +0000 UTC m=+1114.593886187" lastFinishedPulling="2026-01-20 16:50:24.495120097 +0000 UTC m=+1142.739724903" observedRunningTime="2026-01-20 16:50:27.432354149 +0000 UTC m=+1145.676958965" watchObservedRunningTime="2026-01-20 16:50:27.460300337 +0000 UTC m=+1145.704905143" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.251273 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7857b9874f-85h9n"] Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.253435 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.256433 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.256764 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.264472 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7857b9874f-85h9n"] Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.360197 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-ovndb-tls-certs\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.360284 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-public-tls-certs\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.360322 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-config\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.360375 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl8wj\" (UniqueName: \"kubernetes.io/projected/121cce9d-e190-44bf-b332-7b268c2ffd26-kube-api-access-tl8wj\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.360400 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-combined-ca-bundle\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.360416 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-internal-tls-certs\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.360439 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-httpd-config\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.416751 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84df7dbffb-njbnq" event={"ID":"8e877da9-408f-40dd-8e4a-5173ba3d6988","Type":"ContainerStarted","Data":"d5e83fb3d6635ff7c522d7f7fcf25ac6e8c713eb35fed7645bab1f6b93ff4b23"} Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.419581 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8dfc578d4-g6p6m" event={"ID":"8a05857e-09be-4a5d-8e6d-00ffcb0b2400","Type":"ContainerStarted","Data":"d98b30eb6a6bd1d782d580b8551a61aac704eedf14bad87f9d819feb5c166c48"} Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.421027 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2e22732f-c280-4803-b3d6-8f5a4d0ab632","Type":"ContainerStarted","Data":"12368189ecbbd335434012dc58eb67c1b202101506bd5323a04b5a3588742fa0"} Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.438037 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6db4b5b7df-cv7h4" event={"ID":"701cf418-d6f5-4326-b237-2fd120de4bd3","Type":"ContainerStarted","Data":"535ee8d3926b615b662db7b8b0bfb8429276fe61f3678c3c6a17376ba2509b31"} Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.438197 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6db4b5b7df-cv7h4" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerName="horizon-log" containerID="cri-o://0d07b2ef5695d0b7aade74ffebd5e08dfe97b23fafbe31f5e44ce4e19adc4fa6" gracePeriod=30 Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.438526 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6db4b5b7df-cv7h4" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerName="horizon" containerID="cri-o://535ee8d3926b615b662db7b8b0bfb8429276fe61f3678c3c6a17376ba2509b31" gracePeriod=30 Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.471996 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-combined-ca-bundle\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.472045 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-internal-tls-certs\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.472073 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-httpd-config\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.472158 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-ovndb-tls-certs\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.472191 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-public-tls-certs\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.472221 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-config\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.472270 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl8wj\" (UniqueName: \"kubernetes.io/projected/121cce9d-e190-44bf-b332-7b268c2ffd26-kube-api-access-tl8wj\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.474852 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6db4b5b7df-cv7h4" podStartSLOduration=4.629025677 podStartE2EDuration="34.474836318s" podCreationTimestamp="2026-01-20 16:49:54 +0000 UTC" firstStartedPulling="2026-01-20 16:49:56.42747865 +0000 UTC m=+1114.672083456" lastFinishedPulling="2026-01-20 16:50:26.273289291 +0000 UTC m=+1144.517894097" observedRunningTime="2026-01-20 16:50:28.471807936 +0000 UTC m=+1146.716412742" watchObservedRunningTime="2026-01-20 16:50:28.474836318 +0000 UTC m=+1146.719441124" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.477785 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-ovndb-tls-certs\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.480989 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-httpd-config\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.482966 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-public-tls-certs\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.483814 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-internal-tls-certs\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.484583 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-combined-ca-bundle\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.494935 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/121cce9d-e190-44bf-b332-7b268c2ffd26-config\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.501314 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl8wj\" (UniqueName: \"kubernetes.io/projected/121cce9d-e190-44bf-b332-7b268c2ffd26-kube-api-access-tl8wj\") pod \"neutron-7857b9874f-85h9n\" (UID: \"121cce9d-e190-44bf-b332-7b268c2ffd26\") " pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:28 crc kubenswrapper[4995]: I0120 16:50:28.596821 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:29 crc kubenswrapper[4995]: E0120 16:50:29.320227 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff583ed2_5f45_45fe_aa25_f02872f482b1.slice/crio-c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3.scope\": RecentStats: unable to find data in memory cache]" Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.343479 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7857b9874f-85h9n"] Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.473538 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2e22732f-c280-4803-b3d6-8f5a4d0ab632","Type":"ContainerStarted","Data":"bd2564138b3f646624f7076acd763180cf56421e8f09111dc6b09d760a394028"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.474824 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerName="glance-log" containerID="cri-o://12368189ecbbd335434012dc58eb67c1b202101506bd5323a04b5a3588742fa0" gracePeriod=30 Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.475119 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerName="glance-httpd" containerID="cri-o://bd2564138b3f646624f7076acd763180cf56421e8f09111dc6b09d760a394028" gracePeriod=30 Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.501573 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=29.501555849 podStartE2EDuration="29.501555849s" podCreationTimestamp="2026-01-20 16:50:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:29.499816573 +0000 UTC m=+1147.744421379" watchObservedRunningTime="2026-01-20 16:50:29.501555849 +0000 UTC m=+1147.746160655" Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.524705 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-588ff59d7c-f4fvk" event={"ID":"d3afe443-c0e4-49f2-9245-29db8eeefba5","Type":"ContainerStarted","Data":"513c03e1212539567d9fe49c930fb99027cdf27baa3e69d25f1cf4364f17d310"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.525192 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-588ff59d7c-f4fvk" podUID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerName="horizon-log" containerID="cri-o://5f7d1b1b1020d3572a3af35cb8858236820c5f35f7db8307b2befccacebdde4f" gracePeriod=30 Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.525991 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-588ff59d7c-f4fvk" podUID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerName="horizon" containerID="cri-o://513c03e1212539567d9fe49c930fb99027cdf27baa3e69d25f1cf4364f17d310" gracePeriod=30 Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.541461 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7cd588cc5b-pmhlg" event={"ID":"83a7df1c-c59a-4a4c-b34d-df9fc6711aea","Type":"ContainerStarted","Data":"1a793652c6162b81f1e8741df346f08e87cdc156dadf22deba4b8bdc35033ad3"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.541504 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7cd588cc5b-pmhlg" event={"ID":"83a7df1c-c59a-4a4c-b34d-df9fc6711aea","Type":"ContainerStarted","Data":"1b901093e308a72f361368cbe03c45b17635d89b371aec2983ce9f1a6ed9458d"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.550747 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3edee2ed-6825-49d5-9556-a33a54331f20","Type":"ContainerStarted","Data":"11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.569465 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e646811-19e9-4a68-a419-6d0db9feb93e","Type":"ContainerStarted","Data":"864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.582406 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-588ff59d7c-f4fvk" podStartSLOduration=7.011234363 podStartE2EDuration="33.58238786s" podCreationTimestamp="2026-01-20 16:49:56 +0000 UTC" firstStartedPulling="2026-01-20 16:49:57.927591669 +0000 UTC m=+1116.172196475" lastFinishedPulling="2026-01-20 16:50:24.498745126 +0000 UTC m=+1142.743349972" observedRunningTime="2026-01-20 16:50:29.560038804 +0000 UTC m=+1147.804643610" watchObservedRunningTime="2026-01-20 16:50:29.58238786 +0000 UTC m=+1147.826992666" Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.607776 4995 generic.go:334] "Generic (PLEG): container finished" podID="ff583ed2-5f45-45fe-aa25-f02872f482b1" containerID="c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3" exitCode=0 Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.608049 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" event={"ID":"ff583ed2-5f45-45fe-aa25-f02872f482b1","Type":"ContainerDied","Data":"c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.617321 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84df7dbffb-njbnq" event={"ID":"8e877da9-408f-40dd-8e4a-5173ba3d6988","Type":"ContainerStarted","Data":"b8e948adfe77d2cd9ea089ac1055ca05cf957452ed332421df4521eb39eda287"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.631647 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8dfc578d4-g6p6m" event={"ID":"8a05857e-09be-4a5d-8e6d-00ffcb0b2400","Type":"ContainerStarted","Data":"2379974f509cd0ea89130ddc261469f046e2000dbfe03c476aaae116fca92ae2"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.631692 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8dfc578d4-g6p6m" event={"ID":"8a05857e-09be-4a5d-8e6d-00ffcb0b2400","Type":"ContainerStarted","Data":"581d36a7796922e7f8417f9a42dd99de4ddd3acee35cb13a89958184095bd8e0"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.632058 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.635326 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7857b9874f-85h9n" event={"ID":"121cce9d-e190-44bf-b332-7b268c2ffd26","Type":"ContainerStarted","Data":"fc24750da549e6c2ecda9ce32aebf89bd86b8baab0b598527a51c4b2cef572b8"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.638147 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7cd588cc5b-pmhlg" podStartSLOduration=27.637985106 podStartE2EDuration="27.637985106s" podCreationTimestamp="2026-01-20 16:50:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:29.602828553 +0000 UTC m=+1147.847433359" watchObservedRunningTime="2026-01-20 16:50:29.637985106 +0000 UTC m=+1147.882589932" Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.657235 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-shkpd" event={"ID":"eabde76f-904d-4313-8013-f17d65cc178f","Type":"ContainerStarted","Data":"654eb3f017d11bd7f624b2107af79990d8d6e127d9e87cc164d494b5f1cac8b5"} Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.703061 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-84df7dbffb-njbnq" podStartSLOduration=27.703036089 podStartE2EDuration="27.703036089s" podCreationTimestamp="2026-01-20 16:50:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:29.653716223 +0000 UTC m=+1147.898321029" watchObservedRunningTime="2026-01-20 16:50:29.703036089 +0000 UTC m=+1147.947640895" Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.712424 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8dfc578d4-g6p6m" podStartSLOduration=4.712399693 podStartE2EDuration="4.712399693s" podCreationTimestamp="2026-01-20 16:50:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:29.671670059 +0000 UTC m=+1147.916274865" watchObservedRunningTime="2026-01-20 16:50:29.712399693 +0000 UTC m=+1147.957004499" Jan 20 16:50:29 crc kubenswrapper[4995]: I0120 16:50:29.721187 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-shkpd" podStartSLOduration=22.721168751 podStartE2EDuration="22.721168751s" podCreationTimestamp="2026-01-20 16:50:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:29.68977603 +0000 UTC m=+1147.934380836" watchObservedRunningTime="2026-01-20 16:50:29.721168751 +0000 UTC m=+1147.965773557" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.572606 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.573190 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.573232 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.573791 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8ecef0f787bcc6c0229321b3bf04fd7a400236ca19aefa00a3e8afeb5931315b"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.573845 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://8ecef0f787bcc6c0229321b3bf04fd7a400236ca19aefa00a3e8afeb5931315b" gracePeriod=600 Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.694929 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" event={"ID":"ff583ed2-5f45-45fe-aa25-f02872f482b1","Type":"ContainerStarted","Data":"b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a"} Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.695357 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.702681 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7857b9874f-85h9n" event={"ID":"121cce9d-e190-44bf-b332-7b268c2ffd26","Type":"ContainerStarted","Data":"5606fb2f90fe83c47938c35a884480c39eea3e2fb16d06a81bb0b6d85e786378"} Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.702744 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7857b9874f-85h9n" event={"ID":"121cce9d-e190-44bf-b332-7b268c2ffd26","Type":"ContainerStarted","Data":"4c00a537ff075f867ee179c429e56070d87a2bf43410fe7dd8036e9f8b3a56b8"} Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.703556 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.706215 4995 generic.go:334] "Generic (PLEG): container finished" podID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerID="bd2564138b3f646624f7076acd763180cf56421e8f09111dc6b09d760a394028" exitCode=0 Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.706244 4995 generic.go:334] "Generic (PLEG): container finished" podID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerID="12368189ecbbd335434012dc58eb67c1b202101506bd5323a04b5a3588742fa0" exitCode=143 Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.706287 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2e22732f-c280-4803-b3d6-8f5a4d0ab632","Type":"ContainerDied","Data":"bd2564138b3f646624f7076acd763180cf56421e8f09111dc6b09d760a394028"} Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.706309 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2e22732f-c280-4803-b3d6-8f5a4d0ab632","Type":"ContainerDied","Data":"12368189ecbbd335434012dc58eb67c1b202101506bd5323a04b5a3588742fa0"} Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.711548 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3edee2ed-6825-49d5-9556-a33a54331f20","Type":"ContainerStarted","Data":"285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c"} Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.770736 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" podStartSLOduration=5.77071993 podStartE2EDuration="5.77071993s" podCreationTimestamp="2026-01-20 16:50:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:30.737579653 +0000 UTC m=+1148.982184459" watchObservedRunningTime="2026-01-20 16:50:30.77071993 +0000 UTC m=+1149.015324736" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.772296 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7857b9874f-85h9n" podStartSLOduration=2.772290423 podStartE2EDuration="2.772290423s" podCreationTimestamp="2026-01-20 16:50:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:30.765265763 +0000 UTC m=+1149.009870579" watchObservedRunningTime="2026-01-20 16:50:30.772290423 +0000 UTC m=+1149.016895219" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.791258 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=27.791237067 podStartE2EDuration="27.791237067s" podCreationTimestamp="2026-01-20 16:50:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:30.788572055 +0000 UTC m=+1149.033176861" watchObservedRunningTime="2026-01-20 16:50:30.791237067 +0000 UTC m=+1149.035841873" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.851306 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.994488 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-logs\") pod \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.994547 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-httpd-run\") pod \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.994620 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-scripts\") pod \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.994656 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6qgw\" (UniqueName: \"kubernetes.io/projected/2e22732f-c280-4803-b3d6-8f5a4d0ab632-kube-api-access-z6qgw\") pod \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.994699 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-public-tls-certs\") pod \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.994817 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-combined-ca-bundle\") pod \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.994905 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.994961 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-config-data\") pod \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\" (UID: \"2e22732f-c280-4803-b3d6-8f5a4d0ab632\") " Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.995396 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-logs" (OuterVolumeSpecName: "logs") pod "2e22732f-c280-4803-b3d6-8f5a4d0ab632" (UID: "2e22732f-c280-4803-b3d6-8f5a4d0ab632"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.995422 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2e22732f-c280-4803-b3d6-8f5a4d0ab632" (UID: "2e22732f-c280-4803-b3d6-8f5a4d0ab632"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.996155 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:30 crc kubenswrapper[4995]: I0120 16:50:30.996182 4995 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e22732f-c280-4803-b3d6-8f5a4d0ab632-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.014701 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "2e22732f-c280-4803-b3d6-8f5a4d0ab632" (UID: "2e22732f-c280-4803-b3d6-8f5a4d0ab632"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.015012 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-scripts" (OuterVolumeSpecName: "scripts") pod "2e22732f-c280-4803-b3d6-8f5a4d0ab632" (UID: "2e22732f-c280-4803-b3d6-8f5a4d0ab632"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.020802 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e22732f-c280-4803-b3d6-8f5a4d0ab632-kube-api-access-z6qgw" (OuterVolumeSpecName: "kube-api-access-z6qgw") pod "2e22732f-c280-4803-b3d6-8f5a4d0ab632" (UID: "2e22732f-c280-4803-b3d6-8f5a4d0ab632"). InnerVolumeSpecName "kube-api-access-z6qgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.064236 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2e22732f-c280-4803-b3d6-8f5a4d0ab632" (UID: "2e22732f-c280-4803-b3d6-8f5a4d0ab632"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.098506 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.098546 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.098559 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6qgw\" (UniqueName: \"kubernetes.io/projected/2e22732f-c280-4803-b3d6-8f5a4d0ab632-kube-api-access-z6qgw\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.098571 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.132297 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "2e22732f-c280-4803-b3d6-8f5a4d0ab632" (UID: "2e22732f-c280-4803-b3d6-8f5a4d0ab632"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.151355 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-config-data" (OuterVolumeSpecName: "config-data") pod "2e22732f-c280-4803-b3d6-8f5a4d0ab632" (UID: "2e22732f-c280-4803-b3d6-8f5a4d0ab632"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.162250 4995 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.211620 4995 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.211647 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.211658 4995 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e22732f-c280-4803-b3d6-8f5a4d0ab632-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.723804 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="8ecef0f787bcc6c0229321b3bf04fd7a400236ca19aefa00a3e8afeb5931315b" exitCode=0 Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.723873 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"8ecef0f787bcc6c0229321b3bf04fd7a400236ca19aefa00a3e8afeb5931315b"} Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.724135 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"e4ec06a3af3d63376517d75d9eacbb252d52f03f8933ff215b7181152846db60"} Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.724153 4995 scope.go:117] "RemoveContainer" containerID="ef514ad170d2e1a38aa428bd4835a847c0ca2074e5ec7e7cc5427ce30e0cd1ed" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.731173 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2e22732f-c280-4803-b3d6-8f5a4d0ab632","Type":"ContainerDied","Data":"3f2d0a67feac908f2bf6727767a7d4f3eb9152d996f81e088243c20f92493a60"} Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.731228 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.788888 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.836095 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.856874 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:31 crc kubenswrapper[4995]: E0120 16:50:31.857254 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerName="glance-httpd" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.857271 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerName="glance-httpd" Jan 20 16:50:31 crc kubenswrapper[4995]: E0120 16:50:31.857285 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerName="glance-log" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.857291 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerName="glance-log" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.857460 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerName="glance-httpd" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.857476 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" containerName="glance-log" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.858371 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.858707 4995 scope.go:117] "RemoveContainer" containerID="bd2564138b3f646624f7076acd763180cf56421e8f09111dc6b09d760a394028" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.862697 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.869951 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.873102 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:31 crc kubenswrapper[4995]: I0120 16:50:31.940271 4995 scope.go:117] "RemoveContainer" containerID="12368189ecbbd335434012dc58eb67c1b202101506bd5323a04b5a3588742fa0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.023209 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e22732f-c280-4803-b3d6-8f5a4d0ab632" path="/var/lib/kubelet/pods/2e22732f-c280-4803-b3d6-8f5a4d0ab632/volumes" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.042132 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t246w\" (UniqueName: \"kubernetes.io/projected/c66738b7-dac3-4487-b128-215f8e2eb48f-kube-api-access-t246w\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.042171 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.042198 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-logs\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.042217 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.042236 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.042283 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.042335 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-scripts\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.042355 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-config-data\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.144281 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.144406 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-scripts\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.144428 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-config-data\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.144522 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t246w\" (UniqueName: \"kubernetes.io/projected/c66738b7-dac3-4487-b128-215f8e2eb48f-kube-api-access-t246w\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.144542 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.144570 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-logs\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.144590 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.144632 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.145315 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.148008 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-logs\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.148679 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.156738 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.157531 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-scripts\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.158436 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.158576 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-config-data\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.195462 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t246w\" (UniqueName: \"kubernetes.io/projected/c66738b7-dac3-4487-b128-215f8e2eb48f-kube-api-access-t246w\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.228368 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.493773 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.741938 4995 generic.go:334] "Generic (PLEG): container finished" podID="40362747-51a2-473e-845c-3427003b9b7a" containerID="cc4db8b534ebf1fe6b4679e9246066f633e666f3cf0eb0bc1048bccf4a0334d1" exitCode=0 Jan 20 16:50:32 crc kubenswrapper[4995]: I0120 16:50:32.742260 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4zjlv" event={"ID":"40362747-51a2-473e-845c-3427003b9b7a","Type":"ContainerDied","Data":"cc4db8b534ebf1fe6b4679e9246066f633e666f3cf0eb0bc1048bccf4a0334d1"} Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.114527 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.114914 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.212161 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.309343 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.309400 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.529235 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.530405 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.530564 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.530615 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.586647 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.616235 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.769740 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c66738b7-dac3-4487-b128-215f8e2eb48f","Type":"ContainerStarted","Data":"397c15fbb1de49ccf09d0dd2ab62becd0ac14e7f380c22aefc223a9d27683b0a"} Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.771735 4995 generic.go:334] "Generic (PLEG): container finished" podID="e5603781-3cf3-41db-bfc7-7dc74d244fd4" containerID="29bf869aa9c0b173a588767f8be2c70c46b4d38c9d6f32d66064158343dde75b" exitCode=0 Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.771791 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-ncfj6" event={"ID":"e5603781-3cf3-41db-bfc7-7dc74d244fd4","Type":"ContainerDied","Data":"29bf869aa9c0b173a588767f8be2c70c46b4d38c9d6f32d66064158343dde75b"} Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.774462 4995 generic.go:334] "Generic (PLEG): container finished" podID="eabde76f-904d-4313-8013-f17d65cc178f" containerID="654eb3f017d11bd7f624b2107af79990d8d6e127d9e87cc164d494b5f1cac8b5" exitCode=0 Jan 20 16:50:33 crc kubenswrapper[4995]: I0120 16:50:33.774520 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-shkpd" event={"ID":"eabde76f-904d-4313-8013-f17d65cc178f","Type":"ContainerDied","Data":"654eb3f017d11bd7f624b2107af79990d8d6e127d9e87cc164d494b5f1cac8b5"} Jan 20 16:50:34 crc kubenswrapper[4995]: I0120 16:50:34.833667 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c66738b7-dac3-4487-b128-215f8e2eb48f","Type":"ContainerStarted","Data":"1daf20f0c67b095f761b9f1f0f0a04598d305aeda734d3601e0911d7243afb73"} Jan 20 16:50:35 crc kubenswrapper[4995]: I0120 16:50:35.163095 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:50:36 crc kubenswrapper[4995]: I0120 16:50:36.219278 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:50:36 crc kubenswrapper[4995]: I0120 16:50:36.292454 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-fcjgj"] Jan 20 16:50:36 crc kubenswrapper[4995]: I0120 16:50:36.292731 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" podUID="f12a8420-f199-4df8-8e65-66fe1a2d9fce" containerName="dnsmasq-dns" containerID="cri-o://b1bb280977cc756f6c7053ebf4ed7c9d3f5f773859d6744c11e27060c89d7b06" gracePeriod=10 Jan 20 16:50:36 crc kubenswrapper[4995]: I0120 16:50:36.365249 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:36 crc kubenswrapper[4995]: I0120 16:50:36.861475 4995 generic.go:334] "Generic (PLEG): container finished" podID="f12a8420-f199-4df8-8e65-66fe1a2d9fce" containerID="b1bb280977cc756f6c7053ebf4ed7c9d3f5f773859d6744c11e27060c89d7b06" exitCode=0 Jan 20 16:50:36 crc kubenswrapper[4995]: I0120 16:50:36.861521 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" event={"ID":"f12a8420-f199-4df8-8e65-66fe1a2d9fce","Type":"ContainerDied","Data":"b1bb280977cc756f6c7053ebf4ed7c9d3f5f773859d6744c11e27060c89d7b06"} Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.094006 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4zjlv" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.104126 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.121240 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.249576 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-config-data\") pod \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.249830 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fl6p\" (UniqueName: \"kubernetes.io/projected/40362747-51a2-473e-845c-3427003b9b7a-kube-api-access-6fl6p\") pod \"40362747-51a2-473e-845c-3427003b9b7a\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.249882 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8lj4\" (UniqueName: \"kubernetes.io/projected/eabde76f-904d-4313-8013-f17d65cc178f-kube-api-access-f8lj4\") pod \"eabde76f-904d-4313-8013-f17d65cc178f\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.249899 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-db-sync-config-data\") pod \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.249936 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-fernet-keys\") pod \"eabde76f-904d-4313-8013-f17d65cc178f\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.249960 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-scripts\") pod \"eabde76f-904d-4313-8013-f17d65cc178f\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.249998 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-scripts\") pod \"40362747-51a2-473e-845c-3427003b9b7a\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.250053 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-config-data\") pod \"eabde76f-904d-4313-8013-f17d65cc178f\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.250102 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdfl8\" (UniqueName: \"kubernetes.io/projected/e5603781-3cf3-41db-bfc7-7dc74d244fd4-kube-api-access-zdfl8\") pod \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.250141 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40362747-51a2-473e-845c-3427003b9b7a-logs\") pod \"40362747-51a2-473e-845c-3427003b9b7a\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.250176 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-combined-ca-bundle\") pod \"40362747-51a2-473e-845c-3427003b9b7a\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.250195 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-credential-keys\") pod \"eabde76f-904d-4313-8013-f17d65cc178f\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.250210 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-combined-ca-bundle\") pod \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\" (UID: \"e5603781-3cf3-41db-bfc7-7dc74d244fd4\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.250275 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-config-data\") pod \"40362747-51a2-473e-845c-3427003b9b7a\" (UID: \"40362747-51a2-473e-845c-3427003b9b7a\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.250290 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-combined-ca-bundle\") pod \"eabde76f-904d-4313-8013-f17d65cc178f\" (UID: \"eabde76f-904d-4313-8013-f17d65cc178f\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.258298 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40362747-51a2-473e-845c-3427003b9b7a-logs" (OuterVolumeSpecName: "logs") pod "40362747-51a2-473e-845c-3427003b9b7a" (UID: "40362747-51a2-473e-845c-3427003b9b7a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.261931 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "eabde76f-904d-4313-8013-f17d65cc178f" (UID: "eabde76f-904d-4313-8013-f17d65cc178f"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.266497 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eabde76f-904d-4313-8013-f17d65cc178f-kube-api-access-f8lj4" (OuterVolumeSpecName: "kube-api-access-f8lj4") pod "eabde76f-904d-4313-8013-f17d65cc178f" (UID: "eabde76f-904d-4313-8013-f17d65cc178f"). InnerVolumeSpecName "kube-api-access-f8lj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.271235 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40362747-51a2-473e-845c-3427003b9b7a-kube-api-access-6fl6p" (OuterVolumeSpecName: "kube-api-access-6fl6p") pod "40362747-51a2-473e-845c-3427003b9b7a" (UID: "40362747-51a2-473e-845c-3427003b9b7a"). InnerVolumeSpecName "kube-api-access-6fl6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.279463 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-scripts" (OuterVolumeSpecName: "scripts") pod "eabde76f-904d-4313-8013-f17d65cc178f" (UID: "eabde76f-904d-4313-8013-f17d65cc178f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.280905 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "eabde76f-904d-4313-8013-f17d65cc178f" (UID: "eabde76f-904d-4313-8013-f17d65cc178f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.305312 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-scripts" (OuterVolumeSpecName: "scripts") pod "40362747-51a2-473e-845c-3427003b9b7a" (UID: "40362747-51a2-473e-845c-3427003b9b7a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.305402 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e5603781-3cf3-41db-bfc7-7dc74d244fd4" (UID: "e5603781-3cf3-41db-bfc7-7dc74d244fd4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.322015 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.331448 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5603781-3cf3-41db-bfc7-7dc74d244fd4-kube-api-access-zdfl8" (OuterVolumeSpecName: "kube-api-access-zdfl8") pod "e5603781-3cf3-41db-bfc7-7dc74d244fd4" (UID: "e5603781-3cf3-41db-bfc7-7dc74d244fd4"). InnerVolumeSpecName "kube-api-access-zdfl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.351687 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8lj4\" (UniqueName: \"kubernetes.io/projected/eabde76f-904d-4313-8013-f17d65cc178f-kube-api-access-f8lj4\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.351828 4995 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.351838 4995 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.351847 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.351855 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.351863 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdfl8\" (UniqueName: \"kubernetes.io/projected/e5603781-3cf3-41db-bfc7-7dc74d244fd4-kube-api-access-zdfl8\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.351870 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40362747-51a2-473e-845c-3427003b9b7a-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.351877 4995 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.351885 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6fl6p\" (UniqueName: \"kubernetes.io/projected/40362747-51a2-473e-845c-3427003b9b7a-kube-api-access-6fl6p\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.362272 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5603781-3cf3-41db-bfc7-7dc74d244fd4" (UID: "e5603781-3cf3-41db-bfc7-7dc74d244fd4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.370213 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eabde76f-904d-4313-8013-f17d65cc178f" (UID: "eabde76f-904d-4313-8013-f17d65cc178f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.383158 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40362747-51a2-473e-845c-3427003b9b7a" (UID: "40362747-51a2-473e-845c-3427003b9b7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.384826 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-config-data" (OuterVolumeSpecName: "config-data") pod "40362747-51a2-473e-845c-3427003b9b7a" (UID: "40362747-51a2-473e-845c-3427003b9b7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.384924 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-config-data" (OuterVolumeSpecName: "config-data") pod "eabde76f-904d-4313-8013-f17d65cc178f" (UID: "eabde76f-904d-4313-8013-f17d65cc178f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.403722 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-config-data" (OuterVolumeSpecName: "config-data") pod "e5603781-3cf3-41db-bfc7-7dc74d244fd4" (UID: "e5603781-3cf3-41db-bfc7-7dc74d244fd4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.453515 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.453545 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.453556 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.453565 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40362747-51a2-473e-845c-3427003b9b7a-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.453575 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eabde76f-904d-4313-8013-f17d65cc178f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.453582 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5603781-3cf3-41db-bfc7-7dc74d244fd4-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.615436 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.761439 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-nb\") pod \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.761752 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-config\") pod \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.761804 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-sb\") pod \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.761821 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-svc\") pod \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.761889 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-swift-storage-0\") pod \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.761968 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4v4x\" (UniqueName: \"kubernetes.io/projected/f12a8420-f199-4df8-8e65-66fe1a2d9fce-kube-api-access-g4v4x\") pod \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\" (UID: \"f12a8420-f199-4df8-8e65-66fe1a2d9fce\") " Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.774560 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f12a8420-f199-4df8-8e65-66fe1a2d9fce-kube-api-access-g4v4x" (OuterVolumeSpecName: "kube-api-access-g4v4x") pod "f12a8420-f199-4df8-8e65-66fe1a2d9fce" (UID: "f12a8420-f199-4df8-8e65-66fe1a2d9fce"). InnerVolumeSpecName "kube-api-access-g4v4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.866502 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4v4x\" (UniqueName: \"kubernetes.io/projected/f12a8420-f199-4df8-8e65-66fe1a2d9fce-kube-api-access-g4v4x\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.883161 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f12a8420-f199-4df8-8e65-66fe1a2d9fce" (UID: "f12a8420-f199-4df8-8e65-66fe1a2d9fce"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.883734 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-config" (OuterVolumeSpecName: "config") pod "f12a8420-f199-4df8-8e65-66fe1a2d9fce" (UID: "f12a8420-f199-4df8-8e65-66fe1a2d9fce"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.884654 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f12a8420-f199-4df8-8e65-66fe1a2d9fce" (UID: "f12a8420-f199-4df8-8e65-66fe1a2d9fce"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.885002 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f12a8420-f199-4df8-8e65-66fe1a2d9fce" (UID: "f12a8420-f199-4df8-8e65-66fe1a2d9fce"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.891464 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" event={"ID":"f12a8420-f199-4df8-8e65-66fe1a2d9fce","Type":"ContainerDied","Data":"7d5c84c72d48c2f60d942da043d8a24bfdaaed224c0ced1ce18559898170020b"} Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.891521 4995 scope.go:117] "RemoveContainer" containerID="b1bb280977cc756f6c7053ebf4ed7c9d3f5f773859d6744c11e27060c89d7b06" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.891655 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-fcjgj" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.893254 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f12a8420-f199-4df8-8e65-66fe1a2d9fce" (UID: "f12a8420-f199-4df8-8e65-66fe1a2d9fce"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.920613 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-4zjlv" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.920635 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-4zjlv" event={"ID":"40362747-51a2-473e-845c-3427003b9b7a","Type":"ContainerDied","Data":"c6bf705a603203aaad7336df23f3ec0e72f4a1e2371f2c8b89365df5f4fd0ee8"} Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.920672 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6bf705a603203aaad7336df23f3ec0e72f4a1e2371f2c8b89365df5f4fd0ee8" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.935177 4995 scope.go:117] "RemoveContainer" containerID="2b8dc37efe02950c324e009629b2907d0457b29306257e011700729b69d5ba67" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.944771 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-ncfj6" event={"ID":"e5603781-3cf3-41db-bfc7-7dc74d244fd4","Type":"ContainerDied","Data":"f77b9403f8afb7551803eaf49a03d040951c15202bf4652cb444daa03b5f6302"} Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.944802 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f77b9403f8afb7551803eaf49a03d040951c15202bf4652cb444daa03b5f6302" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.944863 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-ncfj6" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.954061 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-shkpd" event={"ID":"eabde76f-904d-4313-8013-f17d65cc178f","Type":"ContainerDied","Data":"6690d34bf09b8943ba39972a77aa1e2a22aa355beac922a1204b8e76d5dae046"} Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.954117 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6690d34bf09b8943ba39972a77aa1e2a22aa355beac922a1204b8e76d5dae046" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.954182 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-shkpd" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.968715 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.968741 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.968750 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.968758 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:37 crc kubenswrapper[4995]: I0120 16:50:37.968767 4995 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f12a8420-f199-4df8-8e65-66fe1a2d9fce-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209334 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-679d748c9b-mrbbx"] Jan 20 16:50:38 crc kubenswrapper[4995]: E0120 16:50:38.209676 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eabde76f-904d-4313-8013-f17d65cc178f" containerName="keystone-bootstrap" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209692 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="eabde76f-904d-4313-8013-f17d65cc178f" containerName="keystone-bootstrap" Jan 20 16:50:38 crc kubenswrapper[4995]: E0120 16:50:38.209704 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40362747-51a2-473e-845c-3427003b9b7a" containerName="placement-db-sync" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209710 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="40362747-51a2-473e-845c-3427003b9b7a" containerName="placement-db-sync" Jan 20 16:50:38 crc kubenswrapper[4995]: E0120 16:50:38.209722 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f12a8420-f199-4df8-8e65-66fe1a2d9fce" containerName="init" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209728 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f12a8420-f199-4df8-8e65-66fe1a2d9fce" containerName="init" Jan 20 16:50:38 crc kubenswrapper[4995]: E0120 16:50:38.209739 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5603781-3cf3-41db-bfc7-7dc74d244fd4" containerName="watcher-db-sync" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209745 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5603781-3cf3-41db-bfc7-7dc74d244fd4" containerName="watcher-db-sync" Jan 20 16:50:38 crc kubenswrapper[4995]: E0120 16:50:38.209761 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f12a8420-f199-4df8-8e65-66fe1a2d9fce" containerName="dnsmasq-dns" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209767 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f12a8420-f199-4df8-8e65-66fe1a2d9fce" containerName="dnsmasq-dns" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209926 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="eabde76f-904d-4313-8013-f17d65cc178f" containerName="keystone-bootstrap" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209939 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="40362747-51a2-473e-845c-3427003b9b7a" containerName="placement-db-sync" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209952 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5603781-3cf3-41db-bfc7-7dc74d244fd4" containerName="watcher-db-sync" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.209970 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f12a8420-f199-4df8-8e65-66fe1a2d9fce" containerName="dnsmasq-dns" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.210824 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.231703 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.232061 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.232139 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.233489 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-8sfqz" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.233667 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.266227 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-fcjgj"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.292630 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-combined-ca-bundle\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.292881 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-public-tls-certs\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.292927 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-logs\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.292966 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-scripts\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.292991 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-config-data\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.293029 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvj4c\" (UniqueName: \"kubernetes.io/projected/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-kube-api-access-tvj4c\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.293066 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-internal-tls-certs\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.305899 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-679d748c9b-mrbbx"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.313991 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-fcjgj"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.394976 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-logs\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.395049 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-scripts\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.395097 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-config-data\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.395157 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvj4c\" (UniqueName: \"kubernetes.io/projected/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-kube-api-access-tvj4c\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.395215 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-internal-tls-certs\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.395262 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-combined-ca-bundle\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.395296 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-public-tls-certs\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.395534 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-logs\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.403046 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-config-data\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.404546 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-combined-ca-bundle\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.412607 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-public-tls-certs\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.412959 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-internal-tls-certs\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.413652 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-scripts\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.420874 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvj4c\" (UniqueName: \"kubernetes.io/projected/ff10efe7-680b-4d4a-a950-e2a7dfbd24a1-kube-api-access-tvj4c\") pod \"placement-679d748c9b-mrbbx\" (UID: \"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1\") " pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.462891 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7f99b88f98-w6ztm"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.464031 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.474513 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.474827 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-m8prt" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.474947 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.475147 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.475996 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.481785 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7f99b88f98-w6ztm"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.481854 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.538482 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.600025 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-internal-tls-certs\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.600091 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-credential-keys\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.600143 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-public-tls-certs\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.600178 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-combined-ca-bundle\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.600204 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-fernet-keys\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.600238 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhzr6\" (UniqueName: \"kubernetes.io/projected/d2343e74-3182-46e7-b4d2-7d9c35964fab-kube-api-access-xhzr6\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.600269 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-scripts\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.600295 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-config-data\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.703054 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.703174 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-public-tls-certs\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.703233 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-combined-ca-bundle\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.703258 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-fernet-keys\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.703291 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhzr6\" (UniqueName: \"kubernetes.io/projected/d2343e74-3182-46e7-b4d2-7d9c35964fab-kube-api-access-xhzr6\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.703326 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-scripts\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.703355 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-config-data\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.703393 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-internal-tls-certs\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.703413 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-credential-keys\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.704297 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.717690 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-credential-keys\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.718157 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.721115 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-gd6b5" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.722656 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-fernet-keys\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.722746 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-config-data\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.728354 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.734696 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-combined-ca-bundle\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.735022 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-public-tls-certs\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.735534 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-internal-tls-certs\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.752753 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhzr6\" (UniqueName: \"kubernetes.io/projected/d2343e74-3182-46e7-b4d2-7d9c35964fab-kube-api-access-xhzr6\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.762405 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d2343e74-3182-46e7-b4d2-7d9c35964fab-scripts\") pod \"keystone-7f99b88f98-w6ztm\" (UID: \"d2343e74-3182-46e7-b4d2-7d9c35964fab\") " pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.798904 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.804954 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe959b99-aa94-41d7-aefa-6e6803a337cf-logs\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.805046 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msccv\" (UniqueName: \"kubernetes.io/projected/fe959b99-aa94-41d7-aefa-6e6803a337cf-kube-api-access-msccv\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.805185 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe959b99-aa94-41d7-aefa-6e6803a337cf-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.805235 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe959b99-aa94-41d7-aefa-6e6803a337cf-config-data\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.842534 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.843975 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.860502 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.877772 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.906596 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe959b99-aa94-41d7-aefa-6e6803a337cf-logs\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.906678 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msccv\" (UniqueName: \"kubernetes.io/projected/fe959b99-aa94-41d7-aefa-6e6803a337cf-kube-api-access-msccv\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.906742 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.906764 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-config-data\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.906792 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mnh7\" (UniqueName: \"kubernetes.io/projected/e4fe40d8-90bd-4206-a304-91c67e90d8c0-kube-api-access-5mnh7\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.906818 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe959b99-aa94-41d7-aefa-6e6803a337cf-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.906844 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe959b99-aa94-41d7-aefa-6e6803a337cf-config-data\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.906872 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.906895 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4fe40d8-90bd-4206-a304-91c67e90d8c0-logs\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.907279 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe959b99-aa94-41d7-aefa-6e6803a337cf-logs\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.913814 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe959b99-aa94-41d7-aefa-6e6803a337cf-config-data\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.917272 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe959b99-aa94-41d7-aefa-6e6803a337cf-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.923620 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.925727 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.946144 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.946278 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msccv\" (UniqueName: \"kubernetes.io/projected/fe959b99-aa94-41d7-aefa-6e6803a337cf-kube-api-access-msccv\") pod \"watcher-applier-0\" (UID: \"fe959b99-aa94-41d7-aefa-6e6803a337cf\") " pod="openstack/watcher-applier-0" Jan 20 16:50:38 crc kubenswrapper[4995]: I0120 16:50:38.963324 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.009705 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-config-data\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.009764 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.009805 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.009830 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-config-data\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.009862 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mnh7\" (UniqueName: \"kubernetes.io/projected/e4fe40d8-90bd-4206-a304-91c67e90d8c0-kube-api-access-5mnh7\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.009883 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.009932 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.009952 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4fe40d8-90bd-4206-a304-91c67e90d8c0-logs\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.009967 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ac1d023-57b0-4926-9511-200b094b70f7-logs\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.010001 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5dz4\" (UniqueName: \"kubernetes.io/projected/8ac1d023-57b0-4926-9511-200b094b70f7-kube-api-access-h5dz4\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.012104 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c66738b7-dac3-4487-b128-215f8e2eb48f","Type":"ContainerStarted","Data":"83f8aa16c6e0c99d44dc716b5a2f98a5b1ea1941246e20e15e580378bfb33b78"} Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.013554 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4fe40d8-90bd-4206-a304-91c67e90d8c0-logs\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.018187 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.018419 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-config-data\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.027738 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.053225 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mnh7\" (UniqueName: \"kubernetes.io/projected/e4fe40d8-90bd-4206-a304-91c67e90d8c0-kube-api-access-5mnh7\") pod \"watcher-api-0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.102440 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.10241878 podStartE2EDuration="8.10241878s" podCreationTimestamp="2026-01-20 16:50:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:39.101714001 +0000 UTC m=+1157.346318817" watchObservedRunningTime="2026-01-20 16:50:39.10241878 +0000 UTC m=+1157.347023586" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.111944 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.112244 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ac1d023-57b0-4926-9511-200b094b70f7-logs\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.112431 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5dz4\" (UniqueName: \"kubernetes.io/projected/8ac1d023-57b0-4926-9511-200b094b70f7-kube-api-access-h5dz4\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.112654 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-config-data\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.112802 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.118885 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.122963 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ac1d023-57b0-4926-9511-200b094b70f7-logs\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.127853 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-config-data\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.132792 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.145821 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5dz4\" (UniqueName: \"kubernetes.io/projected/8ac1d023-57b0-4926-9511-200b094b70f7-kube-api-access-h5dz4\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.157927 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.160826 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.256772 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 20 16:50:39 crc kubenswrapper[4995]: I0120 16:50:39.588513 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 20 16:50:40 crc kubenswrapper[4995]: I0120 16:50:40.000769 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f12a8420-f199-4df8-8e65-66fe1a2d9fce" path="/var/lib/kubelet/pods/f12a8420-f199-4df8-8e65-66fe1a2d9fce/volumes" Jan 20 16:50:42 crc kubenswrapper[4995]: I0120 16:50:42.240996 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-679d748c9b-mrbbx"] Jan 20 16:50:42 crc kubenswrapper[4995]: W0120 16:50:42.257320 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff10efe7_680b_4d4a_a950_e2a7dfbd24a1.slice/crio-9fca210fe49083281a91a31b90f8f58cb709e989f6dcaa565b7516ece91be125 WatchSource:0}: Error finding container 9fca210fe49083281a91a31b90f8f58cb709e989f6dcaa565b7516ece91be125: Status 404 returned error can't find the container with id 9fca210fe49083281a91a31b90f8f58cb709e989f6dcaa565b7516ece91be125 Jan 20 16:50:42 crc kubenswrapper[4995]: I0120 16:50:42.287512 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Jan 20 16:50:42 crc kubenswrapper[4995]: W0120 16:50:42.407616 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ac1d023_57b0_4926_9511_200b094b70f7.slice/crio-98ad81168bda289883ff86f8e01a947191004e00f718d674838d35ecf0af3134 WatchSource:0}: Error finding container 98ad81168bda289883ff86f8e01a947191004e00f718d674838d35ecf0af3134: Status 404 returned error can't find the container with id 98ad81168bda289883ff86f8e01a947191004e00f718d674838d35ecf0af3134 Jan 20 16:50:42 crc kubenswrapper[4995]: I0120 16:50:42.488641 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 20 16:50:42 crc kubenswrapper[4995]: I0120 16:50:42.507263 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 20 16:50:42 crc kubenswrapper[4995]: I0120 16:50:42.507299 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 20 16:50:42 crc kubenswrapper[4995]: I0120 16:50:42.521791 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7f99b88f98-w6ztm"] Jan 20 16:50:42 crc kubenswrapper[4995]: I0120 16:50:42.551961 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 20 16:50:42 crc kubenswrapper[4995]: I0120 16:50:42.643389 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 20 16:50:42 crc kubenswrapper[4995]: I0120 16:50:42.648187 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.122049 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-84df7dbffb-njbnq" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.139202 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e4fe40d8-90bd-4206-a304-91c67e90d8c0","Type":"ContainerStarted","Data":"4c7d9676d54da691d7994d6b939ffdcf7736aa9bebfc1880eddcfb27daac51ab"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.139255 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e4fe40d8-90bd-4206-a304-91c67e90d8c0","Type":"ContainerStarted","Data":"287777f6ea29127ae0c036a791958f189d726ed2cb8f4828480e642fd9ecb222"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.139267 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e4fe40d8-90bd-4206-a304-91c67e90d8c0","Type":"ContainerStarted","Data":"a5883c5d7b4ea9192cb096679baa86e3276cf1f13aa725092778fa431c1f7269"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.141199 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.145265 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.173:9322/\": dial tcp 10.217.0.173:9322: connect: connection refused" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.149298 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7f99b88f98-w6ztm" event={"ID":"d2343e74-3182-46e7-b4d2-7d9c35964fab","Type":"ContainerStarted","Data":"2d1ba60d5c5c9cfe274ab38dc9857e21946496a3fc8d1008eee19934f5eaff2e"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.149340 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7f99b88f98-w6ztm" event={"ID":"d2343e74-3182-46e7-b4d2-7d9c35964fab","Type":"ContainerStarted","Data":"347d8e67b0d9a0467f95373dafcdc34e585a8887c530e5cd5d4b8a6626e0fa3c"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.150164 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.159334 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-679d748c9b-mrbbx" event={"ID":"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1","Type":"ContainerStarted","Data":"de8c815ad5d4912b6cd26090ec61ea232da36b7e91965ea0616d71edf1bd88e0"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.159375 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-679d748c9b-mrbbx" event={"ID":"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1","Type":"ContainerStarted","Data":"9fca210fe49083281a91a31b90f8f58cb709e989f6dcaa565b7516ece91be125"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.159409 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.159430 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.177726 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=5.17770998 podStartE2EDuration="5.17770998s" podCreationTimestamp="2026-01-20 16:50:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:43.177394462 +0000 UTC m=+1161.421999268" watchObservedRunningTime="2026-01-20 16:50:43.17770998 +0000 UTC m=+1161.422314786" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.181694 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"fe959b99-aa94-41d7-aefa-6e6803a337cf","Type":"ContainerStarted","Data":"dc2298dfedd3aee6197743ef312863b67ebbe6183a4eba8ae3dfd9a08dd278a2"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.190687 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e646811-19e9-4a68-a419-6d0db9feb93e","Type":"ContainerStarted","Data":"6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.192009 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-cc4hb" event={"ID":"6cb81edf-880e-421a-bc37-258db15b1ad9","Type":"ContainerStarted","Data":"4a0fbaa2aa343e2358ea3cb668e556f59b7a3a76a119c69915aa9ccfa185f3fb"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.194288 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8ac1d023-57b0-4926-9511-200b094b70f7","Type":"ContainerStarted","Data":"98ad81168bda289883ff86f8e01a947191004e00f718d674838d35ecf0af3134"} Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.195126 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.195147 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.211866 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-679d748c9b-mrbbx" podStartSLOduration=5.211837255 podStartE2EDuration="5.211837255s" podCreationTimestamp="2026-01-20 16:50:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:43.202471742 +0000 UTC m=+1161.447076558" watchObservedRunningTime="2026-01-20 16:50:43.211837255 +0000 UTC m=+1161.456442051" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.245180 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7f99b88f98-w6ztm" podStartSLOduration=5.245162758 podStartE2EDuration="5.245162758s" podCreationTimestamp="2026-01-20 16:50:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:43.224962011 +0000 UTC m=+1161.469566817" watchObservedRunningTime="2026-01-20 16:50:43.245162758 +0000 UTC m=+1161.489767564" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.263761 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-cc4hb" podStartSLOduration=3.205237536 podStartE2EDuration="49.263740842s" podCreationTimestamp="2026-01-20 16:49:54 +0000 UTC" firstStartedPulling="2026-01-20 16:49:55.833895165 +0000 UTC m=+1114.078499971" lastFinishedPulling="2026-01-20 16:50:41.892398461 +0000 UTC m=+1160.137003277" observedRunningTime="2026-01-20 16:50:43.246612378 +0000 UTC m=+1161.491217184" watchObservedRunningTime="2026-01-20 16:50:43.263740842 +0000 UTC m=+1161.508345648" Jan 20 16:50:43 crc kubenswrapper[4995]: I0120 16:50:43.314205 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7cd588cc5b-pmhlg" podUID="83a7df1c-c59a-4a4c-b34d-df9fc6711aea" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.163:8443: connect: connection refused" Jan 20 16:50:44 crc kubenswrapper[4995]: I0120 16:50:44.165691 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 20 16:50:44 crc kubenswrapper[4995]: I0120 16:50:44.228210 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-679d748c9b-mrbbx" event={"ID":"ff10efe7-680b-4d4a-a950-e2a7dfbd24a1","Type":"ContainerStarted","Data":"83727714787ce15229b387099578460276a888dc12bf025176bd49e9b162a7eb"} Jan 20 16:50:44 crc kubenswrapper[4995]: I0120 16:50:44.239003 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hmlm7" event={"ID":"53794c82-829c-4b77-b902-01be2130f0b8","Type":"ContainerStarted","Data":"9ec7aac2e8e94c48d769d6accde7419a00a7c797d5510c341e670ed955d52888"} Jan 20 16:50:44 crc kubenswrapper[4995]: I0120 16:50:44.274227 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-hmlm7" podStartSLOduration=5.100592438 podStartE2EDuration="51.274129051s" podCreationTimestamp="2026-01-20 16:49:53 +0000 UTC" firstStartedPulling="2026-01-20 16:49:55.832756335 +0000 UTC m=+1114.077361141" lastFinishedPulling="2026-01-20 16:50:42.006292948 +0000 UTC m=+1160.250897754" observedRunningTime="2026-01-20 16:50:44.270764589 +0000 UTC m=+1162.515369415" watchObservedRunningTime="2026-01-20 16:50:44.274129051 +0000 UTC m=+1162.518733857" Jan 20 16:50:45 crc kubenswrapper[4995]: I0120 16:50:45.285576 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:50:45 crc kubenswrapper[4995]: I0120 16:50:45.287203 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:50:45 crc kubenswrapper[4995]: I0120 16:50:45.287231 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:50:45 crc kubenswrapper[4995]: I0120 16:50:45.563113 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 20 16:50:46 crc kubenswrapper[4995]: I0120 16:50:46.302585 4995 generic.go:334] "Generic (PLEG): container finished" podID="6cb81edf-880e-421a-bc37-258db15b1ad9" containerID="4a0fbaa2aa343e2358ea3cb668e556f59b7a3a76a119c69915aa9ccfa185f3fb" exitCode=0 Jan 20 16:50:46 crc kubenswrapper[4995]: I0120 16:50:46.302643 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-cc4hb" event={"ID":"6cb81edf-880e-421a-bc37-258db15b1ad9","Type":"ContainerDied","Data":"4a0fbaa2aa343e2358ea3cb668e556f59b7a3a76a119c69915aa9ccfa185f3fb"} Jan 20 16:50:46 crc kubenswrapper[4995]: I0120 16:50:46.316402 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8ac1d023-57b0-4926-9511-200b094b70f7","Type":"ContainerStarted","Data":"ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc"} Jan 20 16:50:46 crc kubenswrapper[4995]: I0120 16:50:46.331816 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"fe959b99-aa94-41d7-aefa-6e6803a337cf","Type":"ContainerStarted","Data":"1d52c7817cecb59c6245a822634f70c77670bd51bb9e08c6cf1879995b2ef2c4"} Jan 20 16:50:46 crc kubenswrapper[4995]: I0120 16:50:46.331892 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:50:46 crc kubenswrapper[4995]: I0120 16:50:46.334448 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=5.621161807 podStartE2EDuration="8.33443217s" podCreationTimestamp="2026-01-20 16:50:38 +0000 UTC" firstStartedPulling="2026-01-20 16:50:42.504497858 +0000 UTC m=+1160.749102664" lastFinishedPulling="2026-01-20 16:50:45.217768221 +0000 UTC m=+1163.462373027" observedRunningTime="2026-01-20 16:50:46.33366794 +0000 UTC m=+1164.578272756" watchObservedRunningTime="2026-01-20 16:50:46.33443217 +0000 UTC m=+1164.579036976" Jan 20 16:50:46 crc kubenswrapper[4995]: I0120 16:50:46.359199 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=5.441873049 podStartE2EDuration="8.359181281s" podCreationTimestamp="2026-01-20 16:50:38 +0000 UTC" firstStartedPulling="2026-01-20 16:50:42.29420067 +0000 UTC m=+1160.538805476" lastFinishedPulling="2026-01-20 16:50:45.211508902 +0000 UTC m=+1163.456113708" observedRunningTime="2026-01-20 16:50:46.352249393 +0000 UTC m=+1164.596854209" watchObservedRunningTime="2026-01-20 16:50:46.359181281 +0000 UTC m=+1164.603786087" Jan 20 16:50:46 crc kubenswrapper[4995]: I0120 16:50:46.725741 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.574489 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.765478 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.851470 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-db-sync-config-data\") pod \"6cb81edf-880e-421a-bc37-258db15b1ad9\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.851605 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-combined-ca-bundle\") pod \"6cb81edf-880e-421a-bc37-258db15b1ad9\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.851709 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjdrq\" (UniqueName: \"kubernetes.io/projected/6cb81edf-880e-421a-bc37-258db15b1ad9-kube-api-access-pjdrq\") pod \"6cb81edf-880e-421a-bc37-258db15b1ad9\" (UID: \"6cb81edf-880e-421a-bc37-258db15b1ad9\") " Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.860619 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cb81edf-880e-421a-bc37-258db15b1ad9-kube-api-access-pjdrq" (OuterVolumeSpecName: "kube-api-access-pjdrq") pod "6cb81edf-880e-421a-bc37-258db15b1ad9" (UID: "6cb81edf-880e-421a-bc37-258db15b1ad9"). InnerVolumeSpecName "kube-api-access-pjdrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.860672 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "6cb81edf-880e-421a-bc37-258db15b1ad9" (UID: "6cb81edf-880e-421a-bc37-258db15b1ad9"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.893492 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6cb81edf-880e-421a-bc37-258db15b1ad9" (UID: "6cb81edf-880e-421a-bc37-258db15b1ad9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.954253 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.954304 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjdrq\" (UniqueName: \"kubernetes.io/projected/6cb81edf-880e-421a-bc37-258db15b1ad9-kube-api-access-pjdrq\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:47 crc kubenswrapper[4995]: I0120 16:50:47.954321 4995 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6cb81edf-880e-421a-bc37-258db15b1ad9-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.361726 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-cc4hb" event={"ID":"6cb81edf-880e-421a-bc37-258db15b1ad9","Type":"ContainerDied","Data":"9b648be5c2d42cc9ce883c167ec3dd86c2cbe66b2173d9965233d68b19bb6690"} Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.361795 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9b648be5c2d42cc9ce883c167ec3dd86c2cbe66b2173d9965233d68b19bb6690" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.361796 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-cc4hb" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.636149 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-76684d5cb6-ln2nh"] Jan 20 16:50:48 crc kubenswrapper[4995]: E0120 16:50:48.636944 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cb81edf-880e-421a-bc37-258db15b1ad9" containerName="barbican-db-sync" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.636961 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cb81edf-880e-421a-bc37-258db15b1ad9" containerName="barbican-db-sync" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.637219 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cb81edf-880e-421a-bc37-258db15b1ad9" containerName="barbican-db-sync" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.638441 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.642005 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.642358 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-zb7rr" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.674204 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.683148 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-b68c6cc67-mvcbt"] Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.686394 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.689151 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.809824 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-76684d5cb6-ln2nh"] Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.812972 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-combined-ca-bundle\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.813065 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82750a54-2446-49e7-8251-7ae6f228dc49-config-data\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.813135 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mf88\" (UniqueName: \"kubernetes.io/projected/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-kube-api-access-7mf88\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.813192 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-config-data-custom\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.813241 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rk247\" (UniqueName: \"kubernetes.io/projected/82750a54-2446-49e7-8251-7ae6f228dc49-kube-api-access-rk247\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.813294 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-logs\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.813339 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82750a54-2446-49e7-8251-7ae6f228dc49-logs\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.813362 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-config-data\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.813391 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82750a54-2446-49e7-8251-7ae6f228dc49-combined-ca-bundle\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.813420 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/82750a54-2446-49e7-8251-7ae6f228dc49-config-data-custom\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.856163 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-b68c6cc67-mvcbt"] Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.911989 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-qm6ct"] Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.913641 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.920747 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-combined-ca-bundle\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.920821 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82750a54-2446-49e7-8251-7ae6f228dc49-config-data\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.920859 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mf88\" (UniqueName: \"kubernetes.io/projected/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-kube-api-access-7mf88\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.920902 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-config-data-custom\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.920934 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rk247\" (UniqueName: \"kubernetes.io/projected/82750a54-2446-49e7-8251-7ae6f228dc49-kube-api-access-rk247\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.920968 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-logs\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.920996 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82750a54-2446-49e7-8251-7ae6f228dc49-logs\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.921011 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-config-data\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.921030 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82750a54-2446-49e7-8251-7ae6f228dc49-combined-ca-bundle\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.921048 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/82750a54-2446-49e7-8251-7ae6f228dc49-config-data-custom\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.938945 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-logs\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.951436 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/82750a54-2446-49e7-8251-7ae6f228dc49-config-data-custom\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.951616 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82750a54-2446-49e7-8251-7ae6f228dc49-combined-ca-bundle\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.939469 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-combined-ca-bundle\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.952997 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82750a54-2446-49e7-8251-7ae6f228dc49-logs\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.961236 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mf88\" (UniqueName: \"kubernetes.io/projected/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-kube-api-access-7mf88\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.961695 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-config-data-custom\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.966287 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82750a54-2446-49e7-8251-7ae6f228dc49-config-data\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.995313 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5-config-data\") pod \"barbican-worker-b68c6cc67-mvcbt\" (UID: \"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5\") " pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:48 crc kubenswrapper[4995]: I0120 16:50:48.997707 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rk247\" (UniqueName: \"kubernetes.io/projected/82750a54-2446-49e7-8251-7ae6f228dc49-kube-api-access-rk247\") pod \"barbican-keystone-listener-76684d5cb6-ln2nh\" (UID: \"82750a54-2446-49e7-8251-7ae6f228dc49\") " pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.013984 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.017849 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-qm6ct"] Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.022739 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.023057 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-config\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.023210 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-svc\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.023305 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.023399 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7tg5\" (UniqueName: \"kubernetes.io/projected/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-kube-api-access-n7tg5\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.023476 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.050649 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-77d6d89bf8-nkl4h"] Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.052194 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.055849 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.072638 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-b68c6cc67-mvcbt" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.087954 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-77d6d89bf8-nkl4h"] Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.127451 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdhr8\" (UniqueName: \"kubernetes.io/projected/3ce1f610-b53c-457f-89b8-d484d02bb210-kube-api-access-tdhr8\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.134567 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data-custom\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.134766 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.134902 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-config\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.144265 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-svc\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.144515 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-combined-ca-bundle\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.144664 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.144779 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ce1f610-b53c-457f-89b8-d484d02bb210-logs\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.144882 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7tg5\" (UniqueName: \"kubernetes.io/projected/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-kube-api-access-n7tg5\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.161558 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.161713 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.141994 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.142864 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-config\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.159271 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.130126 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.161970 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.163594 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.145753 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-svc\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.164512 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.179925 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7tg5\" (UniqueName: \"kubernetes.io/projected/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-kube-api-access-n7tg5\") pod \"dnsmasq-dns-85ff748b95-qm6ct\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.185090 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.251164 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.259632 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.264348 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-combined-ca-bundle\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.264408 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ce1f610-b53c-457f-89b8-d484d02bb210-logs\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.264496 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.264582 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdhr8\" (UniqueName: \"kubernetes.io/projected/3ce1f610-b53c-457f-89b8-d484d02bb210-kube-api-access-tdhr8\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.264624 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data-custom\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.266038 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ce1f610-b53c-457f-89b8-d484d02bb210-logs\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.275219 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.277066 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-combined-ca-bundle\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.280577 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data-custom\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.292316 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdhr8\" (UniqueName: \"kubernetes.io/projected/3ce1f610-b53c-457f-89b8-d484d02bb210-kube-api-access-tdhr8\") pod \"barbican-api-77d6d89bf8-nkl4h\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.341223 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.375379 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.397639 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.397797 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.409120 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.513536 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.522912 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.725011 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-b68c6cc67-mvcbt"] Jan 20 16:50:49 crc kubenswrapper[4995]: I0120 16:50:49.887644 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-76684d5cb6-ln2nh"] Jan 20 16:50:49 crc kubenswrapper[4995]: E0120 16:50:49.955186 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod53794c82_829c_4b77_b902_01be2130f0b8.slice/crio-conmon-9ec7aac2e8e94c48d769d6accde7419a00a7c797d5510c341e670ed955d52888.scope\": RecentStats: unable to find data in memory cache]" Jan 20 16:50:50 crc kubenswrapper[4995]: I0120 16:50:50.193646 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-qm6ct"] Jan 20 16:50:50 crc kubenswrapper[4995]: I0120 16:50:50.309415 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-77d6d89bf8-nkl4h"] Jan 20 16:50:50 crc kubenswrapper[4995]: I0120 16:50:50.408012 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" event={"ID":"f8c780d8-786b-4f35-a3ff-22b2e9081e1d","Type":"ContainerStarted","Data":"8332ff305aef87958a889aa83d6cd73460a254d545ece41c0ba05dabf2f02c6e"} Jan 20 16:50:50 crc kubenswrapper[4995]: I0120 16:50:50.411677 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b68c6cc67-mvcbt" event={"ID":"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5","Type":"ContainerStarted","Data":"e3dce5130a6e04c83f42e5e9b51edf7cddad29dc8fa0e50f640b4bed2378150e"} Jan 20 16:50:50 crc kubenswrapper[4995]: I0120 16:50:50.413572 4995 generic.go:334] "Generic (PLEG): container finished" podID="53794c82-829c-4b77-b902-01be2130f0b8" containerID="9ec7aac2e8e94c48d769d6accde7419a00a7c797d5510c341e670ed955d52888" exitCode=0 Jan 20 16:50:50 crc kubenswrapper[4995]: I0120 16:50:50.413633 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hmlm7" event={"ID":"53794c82-829c-4b77-b902-01be2130f0b8","Type":"ContainerDied","Data":"9ec7aac2e8e94c48d769d6accde7419a00a7c797d5510c341e670ed955d52888"} Jan 20 16:50:50 crc kubenswrapper[4995]: I0120 16:50:50.417398 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" event={"ID":"82750a54-2446-49e7-8251-7ae6f228dc49","Type":"ContainerStarted","Data":"3a38aea810a0d82d75a728ee196279c436ddcaa0725218093e2cb0318162f6d9"} Jan 20 16:50:50 crc kubenswrapper[4995]: I0120 16:50:50.419058 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77d6d89bf8-nkl4h" event={"ID":"3ce1f610-b53c-457f-89b8-d484d02bb210","Type":"ContainerStarted","Data":"949cc6adf0a0fffb57086a696658d65effd378b1027923e3666f322a9be444aa"} Jan 20 16:50:51 crc kubenswrapper[4995]: I0120 16:50:51.436727 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77d6d89bf8-nkl4h" event={"ID":"3ce1f610-b53c-457f-89b8-d484d02bb210","Type":"ContainerStarted","Data":"2ac38c99dfc243cd4ea3c9b9e60c3f1697ca7adfd761b65a95f3b120e0e2a962"} Jan 20 16:50:51 crc kubenswrapper[4995]: I0120 16:50:51.437194 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77d6d89bf8-nkl4h" event={"ID":"3ce1f610-b53c-457f-89b8-d484d02bb210","Type":"ContainerStarted","Data":"2daad3af49862ef1f64bb065a90201dbe0da8597c8ec895043af546d54ca691b"} Jan 20 16:50:51 crc kubenswrapper[4995]: I0120 16:50:51.437509 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:51 crc kubenswrapper[4995]: I0120 16:50:51.437534 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:50:51 crc kubenswrapper[4995]: I0120 16:50:51.448311 4995 generic.go:334] "Generic (PLEG): container finished" podID="f8c780d8-786b-4f35-a3ff-22b2e9081e1d" containerID="10bf7d88e3d5582b2f21c765a155109416c25575f05f0be64d87d1e33be7cfa7" exitCode=0 Jan 20 16:50:51 crc kubenswrapper[4995]: I0120 16:50:51.448451 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" event={"ID":"f8c780d8-786b-4f35-a3ff-22b2e9081e1d","Type":"ContainerDied","Data":"10bf7d88e3d5582b2f21c765a155109416c25575f05f0be64d87d1e33be7cfa7"} Jan 20 16:50:51 crc kubenswrapper[4995]: I0120 16:50:51.499262 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-77d6d89bf8-nkl4h" podStartSLOduration=3.499248606 podStartE2EDuration="3.499248606s" podCreationTimestamp="2026-01-20 16:50:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:50:51.496113651 +0000 UTC m=+1169.740718457" watchObservedRunningTime="2026-01-20 16:50:51.499248606 +0000 UTC m=+1169.743853412" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.461576 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-674779b598-44vdg"] Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.466806 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.469302 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.469440 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.482156 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-674779b598-44vdg"] Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.661781 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-combined-ca-bundle\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.662086 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87360161-2c16-453b-bfeb-649cd107fdf0-logs\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.662124 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-internal-tls-certs\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.662154 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-public-tls-certs\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.662210 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-config-data\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.662240 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-config-data-custom\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.662278 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq8fv\" (UniqueName: \"kubernetes.io/projected/87360161-2c16-453b-bfeb-649cd107fdf0-kube-api-access-tq8fv\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.764217 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-internal-tls-certs\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.764269 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-public-tls-certs\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.764329 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-config-data\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.764359 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-config-data-custom\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.764399 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq8fv\" (UniqueName: \"kubernetes.io/projected/87360161-2c16-453b-bfeb-649cd107fdf0-kube-api-access-tq8fv\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.764414 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-combined-ca-bundle\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.764454 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87360161-2c16-453b-bfeb-649cd107fdf0-logs\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.764903 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87360161-2c16-453b-bfeb-649cd107fdf0-logs\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.772763 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-config-data-custom\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.773060 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-combined-ca-bundle\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.773639 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-public-tls-certs\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.773954 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-internal-tls-certs\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.775018 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87360161-2c16-453b-bfeb-649cd107fdf0-config-data\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.794735 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq8fv\" (UniqueName: \"kubernetes.io/projected/87360161-2c16-453b-bfeb-649cd107fdf0-kube-api-access-tq8fv\") pod \"barbican-api-674779b598-44vdg\" (UID: \"87360161-2c16-453b-bfeb-649cd107fdf0\") " pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:52 crc kubenswrapper[4995]: I0120 16:50:52.840593 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:50:53 crc kubenswrapper[4995]: I0120 16:50:53.114506 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-84df7dbffb-njbnq" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Jan 20 16:50:53 crc kubenswrapper[4995]: I0120 16:50:53.308204 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7cd588cc5b-pmhlg" podUID="83a7df1c-c59a-4a4c-b34d-df9fc6711aea" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.163:8443: connect: connection refused" Jan 20 16:50:55 crc kubenswrapper[4995]: I0120 16:50:55.473691 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Jan 20 16:50:55 crc kubenswrapper[4995]: I0120 16:50:55.474209 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api-log" containerID="cri-o://287777f6ea29127ae0c036a791958f189d726ed2cb8f4828480e642fd9ecb222" gracePeriod=30 Jan 20 16:50:55 crc kubenswrapper[4995]: I0120 16:50:55.474258 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api" containerID="cri-o://4c7d9676d54da691d7994d6b939ffdcf7736aa9bebfc1880eddcfb27daac51ab" gracePeriod=30 Jan 20 16:50:56 crc kubenswrapper[4995]: I0120 16:50:56.248303 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:50:56 crc kubenswrapper[4995]: I0120 16:50:56.510344 4995 generic.go:334] "Generic (PLEG): container finished" podID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerID="287777f6ea29127ae0c036a791958f189d726ed2cb8f4828480e642fd9ecb222" exitCode=143 Jan 20 16:50:56 crc kubenswrapper[4995]: I0120 16:50:56.510397 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e4fe40d8-90bd-4206-a304-91c67e90d8c0","Type":"ContainerDied","Data":"287777f6ea29127ae0c036a791958f189d726ed2cb8f4828480e642fd9ecb222"} Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.091183 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.262010 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-scripts\") pod \"53794c82-829c-4b77-b902-01be2130f0b8\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.262919 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53794c82-829c-4b77-b902-01be2130f0b8-etc-machine-id\") pod \"53794c82-829c-4b77-b902-01be2130f0b8\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.263025 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-db-sync-config-data\") pod \"53794c82-829c-4b77-b902-01be2130f0b8\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.263164 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-558j4\" (UniqueName: \"kubernetes.io/projected/53794c82-829c-4b77-b902-01be2130f0b8-kube-api-access-558j4\") pod \"53794c82-829c-4b77-b902-01be2130f0b8\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.263266 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-combined-ca-bundle\") pod \"53794c82-829c-4b77-b902-01be2130f0b8\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.263286 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/53794c82-829c-4b77-b902-01be2130f0b8-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "53794c82-829c-4b77-b902-01be2130f0b8" (UID: "53794c82-829c-4b77-b902-01be2130f0b8"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.263382 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-config-data\") pod \"53794c82-829c-4b77-b902-01be2130f0b8\" (UID: \"53794c82-829c-4b77-b902-01be2130f0b8\") " Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.268238 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "53794c82-829c-4b77-b902-01be2130f0b8" (UID: "53794c82-829c-4b77-b902-01be2130f0b8"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.270710 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53794c82-829c-4b77-b902-01be2130f0b8-kube-api-access-558j4" (OuterVolumeSpecName: "kube-api-access-558j4") pod "53794c82-829c-4b77-b902-01be2130f0b8" (UID: "53794c82-829c-4b77-b902-01be2130f0b8"). InnerVolumeSpecName "kube-api-access-558j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.280262 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-scripts" (OuterVolumeSpecName: "scripts") pod "53794c82-829c-4b77-b902-01be2130f0b8" (UID: "53794c82-829c-4b77-b902-01be2130f0b8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.280463 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-558j4\" (UniqueName: \"kubernetes.io/projected/53794c82-829c-4b77-b902-01be2130f0b8-kube-api-access-558j4\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.280513 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.280528 4995 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/53794c82-829c-4b77-b902-01be2130f0b8-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.280542 4995 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.297970 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "53794c82-829c-4b77-b902-01be2130f0b8" (UID: "53794c82-829c-4b77-b902-01be2130f0b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.330654 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-config-data" (OuterVolumeSpecName: "config-data") pod "53794c82-829c-4b77-b902-01be2130f0b8" (UID: "53794c82-829c-4b77-b902-01be2130f0b8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.390142 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.390202 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53794c82-829c-4b77-b902-01be2130f0b8-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.523969 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hmlm7" event={"ID":"53794c82-829c-4b77-b902-01be2130f0b8","Type":"ContainerDied","Data":"4517099c2b1e4ce25f1f586f11fb0c280d019e739b6a8406792f9c94491d4697"} Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.524052 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4517099c2b1e4ce25f1f586f11fb0c280d019e739b6a8406792f9c94491d4697" Jan 20 16:50:57 crc kubenswrapper[4995]: I0120 16:50:57.524122 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hmlm7" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.426734 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 16:50:58 crc kubenswrapper[4995]: E0120 16:50:58.427567 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53794c82-829c-4b77-b902-01be2130f0b8" containerName="cinder-db-sync" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.427583 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="53794c82-829c-4b77-b902-01be2130f0b8" containerName="cinder-db-sync" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.427843 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="53794c82-829c-4b77-b902-01be2130f0b8" containerName="cinder-db-sync" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.430247 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.440460 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-sh7s5" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.440630 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.440985 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.441056 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.441263 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.484117 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-qm6ct"] Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.554162 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-4kqz9"] Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.565690 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.574284 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-4kqz9"] Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.582935 4995 generic.go:334] "Generic (PLEG): container finished" podID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerID="0d07b2ef5695d0b7aade74ffebd5e08dfe97b23fafbe31f5e44ce4e19adc4fa6" exitCode=137 Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.582981 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6db4b5b7df-cv7h4" event={"ID":"701cf418-d6f5-4326-b237-2fd120de4bd3","Type":"ContainerDied","Data":"0d07b2ef5695d0b7aade74ffebd5e08dfe97b23fafbe31f5e44ce4e19adc4fa6"} Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.613106 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3000b30-37ee-4724-a13f-71f360b1af38-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.613156 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trmkf\" (UniqueName: \"kubernetes.io/projected/b3000b30-37ee-4724-a13f-71f360b1af38-kube-api-access-trmkf\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.613225 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.613257 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-scripts\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.613293 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.613319 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.624643 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7857b9874f-85h9n" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.651420 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.652965 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.655684 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.672230 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.717876 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.717964 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718046 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4a08e54b-e5a6-46a5-a634-54fb40f817ba-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718063 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a08e54b-e5a6-46a5-a634-54fb40f817ba-logs\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718111 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718142 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718218 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-config\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718260 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp8rx\" (UniqueName: \"kubernetes.io/projected/ef8757f0-901f-4c5f-ac40-85d643918a47-kube-api-access-qp8rx\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718287 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-scripts\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718362 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data-custom\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718418 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718436 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skl8f\" (UniqueName: \"kubernetes.io/projected/4a08e54b-e5a6-46a5-a634-54fb40f817ba-kube-api-access-skl8f\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718500 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718536 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-scripts\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718649 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718680 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718751 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718821 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3000b30-37ee-4724-a13f-71f360b1af38-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.718843 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trmkf\" (UniqueName: \"kubernetes.io/projected/b3000b30-37ee-4724-a13f-71f360b1af38-kube-api-access-trmkf\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.721294 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3000b30-37ee-4724-a13f-71f360b1af38-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.727704 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-scripts\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.728764 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.744309 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8dfc578d4-g6p6m"] Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.744605 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8dfc578d4-g6p6m" podUID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerName="neutron-api" containerID="cri-o://581d36a7796922e7f8417f9a42dd99de4ddd3acee35cb13a89958184095bd8e0" gracePeriod=30 Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.745144 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8dfc578d4-g6p6m" podUID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerName="neutron-httpd" containerID="cri-o://2379974f509cd0ea89130ddc261469f046e2000dbfe03c476aaae116fca92ae2" gracePeriod=30 Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.753826 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.754187 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.756823 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trmkf\" (UniqueName: \"kubernetes.io/projected/b3000b30-37ee-4724-a13f-71f360b1af38-kube-api-access-trmkf\") pod \"cinder-scheduler-0\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.770094 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821354 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821430 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821474 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4a08e54b-e5a6-46a5-a634-54fb40f817ba-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821498 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a08e54b-e5a6-46a5-a634-54fb40f817ba-logs\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821523 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821569 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-config\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821595 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp8rx\" (UniqueName: \"kubernetes.io/projected/ef8757f0-901f-4c5f-ac40-85d643918a47-kube-api-access-qp8rx\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821643 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data-custom\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821679 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skl8f\" (UniqueName: \"kubernetes.io/projected/4a08e54b-e5a6-46a5-a634-54fb40f817ba-kube-api-access-skl8f\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821729 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-scripts\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821765 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821795 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.821827 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.823004 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.823227 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.823313 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4a08e54b-e5a6-46a5-a634-54fb40f817ba-etc-machine-id\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.823636 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a08e54b-e5a6-46a5-a634-54fb40f817ba-logs\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.824298 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.824904 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-config\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.826123 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.827477 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data-custom\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.828751 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.844801 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-scripts\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.847267 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.859801 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp8rx\" (UniqueName: \"kubernetes.io/projected/ef8757f0-901f-4c5f-ac40-85d643918a47-kube-api-access-qp8rx\") pod \"dnsmasq-dns-5c9776ccc5-4kqz9\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.864580 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skl8f\" (UniqueName: \"kubernetes.io/projected/4a08e54b-e5a6-46a5-a634-54fb40f817ba-kube-api-access-skl8f\") pod \"cinder-api-0\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " pod="openstack/cinder-api-0" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.978865 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:50:58 crc kubenswrapper[4995]: I0120 16:50:58.993039 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 16:50:59 crc kubenswrapper[4995]: I0120 16:50:59.170621 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.173:9322/\": dial tcp 10.217.0.173:9322: connect: connection refused" Jan 20 16:50:59 crc kubenswrapper[4995]: I0120 16:50:59.170671 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.173:9322/\": dial tcp 10.217.0.173:9322: connect: connection refused" Jan 20 16:50:59 crc kubenswrapper[4995]: I0120 16:50:59.598987 4995 generic.go:334] "Generic (PLEG): container finished" podID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerID="4c7d9676d54da691d7994d6b939ffdcf7736aa9bebfc1880eddcfb27daac51ab" exitCode=0 Jan 20 16:50:59 crc kubenswrapper[4995]: I0120 16:50:59.599100 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e4fe40d8-90bd-4206-a304-91c67e90d8c0","Type":"ContainerDied","Data":"4c7d9676d54da691d7994d6b939ffdcf7736aa9bebfc1880eddcfb27daac51ab"} Jan 20 16:50:59 crc kubenswrapper[4995]: I0120 16:50:59.603283 4995 generic.go:334] "Generic (PLEG): container finished" podID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerID="2379974f509cd0ea89130ddc261469f046e2000dbfe03c476aaae116fca92ae2" exitCode=0 Jan 20 16:50:59 crc kubenswrapper[4995]: I0120 16:50:59.603314 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8dfc578d4-g6p6m" event={"ID":"8a05857e-09be-4a5d-8e6d-00ffcb0b2400","Type":"ContainerDied","Data":"2379974f509cd0ea89130ddc261469f046e2000dbfe03c476aaae116fca92ae2"} Jan 20 16:50:59 crc kubenswrapper[4995]: I0120 16:50:59.613974 4995 generic.go:334] "Generic (PLEG): container finished" podID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerID="535ee8d3926b615b662db7b8b0bfb8429276fe61f3678c3c6a17376ba2509b31" exitCode=137 Jan 20 16:50:59 crc kubenswrapper[4995]: I0120 16:50:59.614018 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6db4b5b7df-cv7h4" event={"ID":"701cf418-d6f5-4326-b237-2fd120de4bd3","Type":"ContainerDied","Data":"535ee8d3926b615b662db7b8b0bfb8429276fe61f3678c3c6a17376ba2509b31"} Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.644613 4995 generic.go:334] "Generic (PLEG): container finished" podID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerID="513c03e1212539567d9fe49c930fb99027cdf27baa3e69d25f1cf4364f17d310" exitCode=137 Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.645312 4995 generic.go:334] "Generic (PLEG): container finished" podID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerID="5f7d1b1b1020d3572a3af35cb8858236820c5f35f7db8307b2befccacebdde4f" exitCode=137 Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.645429 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-588ff59d7c-f4fvk" event={"ID":"d3afe443-c0e4-49f2-9245-29db8eeefba5","Type":"ContainerDied","Data":"513c03e1212539567d9fe49c930fb99027cdf27baa3e69d25f1cf4364f17d310"} Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.645463 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-588ff59d7c-f4fvk" event={"ID":"d3afe443-c0e4-49f2-9245-29db8eeefba5","Type":"ContainerDied","Data":"5f7d1b1b1020d3572a3af35cb8858236820c5f35f7db8307b2befccacebdde4f"} Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.664012 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" podUID="f8c780d8-786b-4f35-a3ff-22b2e9081e1d" containerName="dnsmasq-dns" containerID="cri-o://51972be83ed45c79182daef86f3655a4ff7cbbf055c6dee26b7249bc9b01d686" gracePeriod=10 Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.664187 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.666102 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 20 16:51:00 crc kubenswrapper[4995]: E0120 16:51:00.672024 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.706929 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" podStartSLOduration=12.706910701 podStartE2EDuration="12.706910701s" podCreationTimestamp="2026-01-20 16:50:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:51:00.699565112 +0000 UTC m=+1178.944169918" watchObservedRunningTime="2026-01-20 16:51:00.706910701 +0000 UTC m=+1178.951515507" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.768804 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.773485 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4fe40d8-90bd-4206-a304-91c67e90d8c0-logs\") pod \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.773533 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mnh7\" (UniqueName: \"kubernetes.io/projected/e4fe40d8-90bd-4206-a304-91c67e90d8c0-kube-api-access-5mnh7\") pod \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.773841 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-combined-ca-bundle\") pod \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.773935 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-config-data\") pod \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.774107 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-custom-prometheus-ca\") pod \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\" (UID: \"e4fe40d8-90bd-4206-a304-91c67e90d8c0\") " Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.777554 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e4fe40d8-90bd-4206-a304-91c67e90d8c0-logs" (OuterVolumeSpecName: "logs") pod "e4fe40d8-90bd-4206-a304-91c67e90d8c0" (UID: "e4fe40d8-90bd-4206-a304-91c67e90d8c0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.815959 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4fe40d8-90bd-4206-a304-91c67e90d8c0-kube-api-access-5mnh7" (OuterVolumeSpecName: "kube-api-access-5mnh7") pod "e4fe40d8-90bd-4206-a304-91c67e90d8c0" (UID: "e4fe40d8-90bd-4206-a304-91c67e90d8c0"). InnerVolumeSpecName "kube-api-access-5mnh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.877737 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4fe40d8-90bd-4206-a304-91c67e90d8c0-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.877983 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mnh7\" (UniqueName: \"kubernetes.io/projected/e4fe40d8-90bd-4206-a304-91c67e90d8c0-kube-api-access-5mnh7\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.886022 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "e4fe40d8-90bd-4206-a304-91c67e90d8c0" (UID: "e4fe40d8-90bd-4206-a304-91c67e90d8c0"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.887780 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e4fe40d8-90bd-4206-a304-91c67e90d8c0" (UID: "e4fe40d8-90bd-4206-a304-91c67e90d8c0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.981113 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:00 crc kubenswrapper[4995]: I0120 16:51:00.981144 4995 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.219214 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-config-data" (OuterVolumeSpecName: "config-data") pod "e4fe40d8-90bd-4206-a304-91c67e90d8c0" (UID: "e4fe40d8-90bd-4206-a304-91c67e90d8c0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.258057 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.275190 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-674779b598-44vdg"] Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.287868 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4fe40d8-90bd-4206-a304-91c67e90d8c0-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.289229 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.301480 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.305792 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.380489 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-4kqz9"] Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398283 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/701cf418-d6f5-4326-b237-2fd120de4bd3-logs\") pod \"701cf418-d6f5-4326-b237-2fd120de4bd3\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398350 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-scripts\") pod \"701cf418-d6f5-4326-b237-2fd120de4bd3\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398370 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3afe443-c0e4-49f2-9245-29db8eeefba5-horizon-secret-key\") pod \"d3afe443-c0e4-49f2-9245-29db8eeefba5\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398391 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/701cf418-d6f5-4326-b237-2fd120de4bd3-horizon-secret-key\") pod \"701cf418-d6f5-4326-b237-2fd120de4bd3\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398460 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wm2d9\" (UniqueName: \"kubernetes.io/projected/701cf418-d6f5-4326-b237-2fd120de4bd3-kube-api-access-wm2d9\") pod \"701cf418-d6f5-4326-b237-2fd120de4bd3\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398478 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lg6jt\" (UniqueName: \"kubernetes.io/projected/d3afe443-c0e4-49f2-9245-29db8eeefba5-kube-api-access-lg6jt\") pod \"d3afe443-c0e4-49f2-9245-29db8eeefba5\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398499 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-config-data\") pod \"d3afe443-c0e4-49f2-9245-29db8eeefba5\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398657 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-config-data\") pod \"701cf418-d6f5-4326-b237-2fd120de4bd3\" (UID: \"701cf418-d6f5-4326-b237-2fd120de4bd3\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398739 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-scripts\") pod \"d3afe443-c0e4-49f2-9245-29db8eeefba5\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398772 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3afe443-c0e4-49f2-9245-29db8eeefba5-logs\") pod \"d3afe443-c0e4-49f2-9245-29db8eeefba5\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.398972 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/701cf418-d6f5-4326-b237-2fd120de4bd3-logs" (OuterVolumeSpecName: "logs") pod "701cf418-d6f5-4326-b237-2fd120de4bd3" (UID: "701cf418-d6f5-4326-b237-2fd120de4bd3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.399440 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/701cf418-d6f5-4326-b237-2fd120de4bd3-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.405091 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3afe443-c0e4-49f2-9245-29db8eeefba5-kube-api-access-lg6jt" (OuterVolumeSpecName: "kube-api-access-lg6jt") pod "d3afe443-c0e4-49f2-9245-29db8eeefba5" (UID: "d3afe443-c0e4-49f2-9245-29db8eeefba5"). InnerVolumeSpecName "kube-api-access-lg6jt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.410769 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3afe443-c0e4-49f2-9245-29db8eeefba5-logs" (OuterVolumeSpecName: "logs") pod "d3afe443-c0e4-49f2-9245-29db8eeefba5" (UID: "d3afe443-c0e4-49f2-9245-29db8eeefba5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.412770 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3afe443-c0e4-49f2-9245-29db8eeefba5-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "d3afe443-c0e4-49f2-9245-29db8eeefba5" (UID: "d3afe443-c0e4-49f2-9245-29db8eeefba5"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.418283 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/701cf418-d6f5-4326-b237-2fd120de4bd3-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "701cf418-d6f5-4326-b237-2fd120de4bd3" (UID: "701cf418-d6f5-4326-b237-2fd120de4bd3"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.425388 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/701cf418-d6f5-4326-b237-2fd120de4bd3-kube-api-access-wm2d9" (OuterVolumeSpecName: "kube-api-access-wm2d9") pod "701cf418-d6f5-4326-b237-2fd120de4bd3" (UID: "701cf418-d6f5-4326-b237-2fd120de4bd3"). InnerVolumeSpecName "kube-api-access-wm2d9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: E0120 16:51:01.474377 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-config-data podName:d3afe443-c0e4-49f2-9245-29db8eeefba5 nodeName:}" failed. No retries permitted until 2026-01-20 16:51:01.974344847 +0000 UTC m=+1180.218949653 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-config-data") pod "d3afe443-c0e4-49f2-9245-29db8eeefba5" (UID: "d3afe443-c0e4-49f2-9245-29db8eeefba5") : error deleting /var/lib/kubelet/pods/d3afe443-c0e4-49f2-9245-29db8eeefba5/volume-subpaths: remove /var/lib/kubelet/pods/d3afe443-c0e4-49f2-9245-29db8eeefba5/volume-subpaths: no such file or directory Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.474814 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-scripts" (OuterVolumeSpecName: "scripts") pod "d3afe443-c0e4-49f2-9245-29db8eeefba5" (UID: "d3afe443-c0e4-49f2-9245-29db8eeefba5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.480305 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-config-data" (OuterVolumeSpecName: "config-data") pod "701cf418-d6f5-4326-b237-2fd120de4bd3" (UID: "701cf418-d6f5-4326-b237-2fd120de4bd3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.486450 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-scripts" (OuterVolumeSpecName: "scripts") pod "701cf418-d6f5-4326-b237-2fd120de4bd3" (UID: "701cf418-d6f5-4326-b237-2fd120de4bd3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.512148 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.512177 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3afe443-c0e4-49f2-9245-29db8eeefba5-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.512187 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.512197 4995 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d3afe443-c0e4-49f2-9245-29db8eeefba5-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.512205 4995 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/701cf418-d6f5-4326-b237-2fd120de4bd3-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.512213 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wm2d9\" (UniqueName: \"kubernetes.io/projected/701cf418-d6f5-4326-b237-2fd120de4bd3-kube-api-access-wm2d9\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.512223 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lg6jt\" (UniqueName: \"kubernetes.io/projected/d3afe443-c0e4-49f2-9245-29db8eeefba5-kube-api-access-lg6jt\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.512231 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/701cf418-d6f5-4326-b237-2fd120de4bd3-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.690057 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e646811-19e9-4a68-a419-6d0db9feb93e","Type":"ContainerStarted","Data":"a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.690527 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="ceilometer-notification-agent" containerID="cri-o://864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9" gracePeriod=30 Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.690931 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.691280 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="proxy-httpd" containerID="cri-o://a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866" gracePeriod=30 Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.691352 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="sg-core" containerID="cri-o://6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6" gracePeriod=30 Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.695706 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" event={"ID":"ef8757f0-901f-4c5f-ac40-85d643918a47","Type":"ContainerStarted","Data":"1b094aced33825a1b27302a7abf62f195ec594f6cabd634fcded6ac71708b718"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.701470 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" event={"ID":"82750a54-2446-49e7-8251-7ae6f228dc49","Type":"ContainerStarted","Data":"95b7cded1f579a31a1b09df8d5c2302cc5e1a0dafbc109521a1a8c0db71ccd0e"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.701513 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" event={"ID":"82750a54-2446-49e7-8251-7ae6f228dc49","Type":"ContainerStarted","Data":"abceec566739eec5cc04f19c4113b17c9a5e9db15758bc7313654cf7ccd1cc32"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.722428 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b68c6cc67-mvcbt" event={"ID":"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5","Type":"ContainerStarted","Data":"3fc79d2b0e3b9723f65dc93bcfed8ddad4f1676af74182394ad9edc0be531d44"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.722470 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b68c6cc67-mvcbt" event={"ID":"2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5","Type":"ContainerStarted","Data":"c2487a8620e21502f6490a9b5f4c7bfe327d7f1116434b0cb3a4f1f2c45c24cf"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.735605 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b3000b30-37ee-4724-a13f-71f360b1af38","Type":"ContainerStarted","Data":"73fd909ce02ef188ae3edb80d7f79d89b8372dee80b01abb728f5e92c5a15e36"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.756115 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-76684d5cb6-ln2nh" podStartSLOduration=3.451422509 podStartE2EDuration="13.756092551s" podCreationTimestamp="2026-01-20 16:50:48 +0000 UTC" firstStartedPulling="2026-01-20 16:50:49.890673937 +0000 UTC m=+1168.135278743" lastFinishedPulling="2026-01-20 16:51:00.195343989 +0000 UTC m=+1178.439948785" observedRunningTime="2026-01-20 16:51:01.747457137 +0000 UTC m=+1179.992061943" watchObservedRunningTime="2026-01-20 16:51:01.756092551 +0000 UTC m=+1180.000697357" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.766805 4995 generic.go:334] "Generic (PLEG): container finished" podID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerID="581d36a7796922e7f8417f9a42dd99de4ddd3acee35cb13a89958184095bd8e0" exitCode=0 Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.766868 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8dfc578d4-g6p6m" event={"ID":"8a05857e-09be-4a5d-8e6d-00ffcb0b2400","Type":"ContainerDied","Data":"581d36a7796922e7f8417f9a42dd99de4ddd3acee35cb13a89958184095bd8e0"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.793232 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-b68c6cc67-mvcbt" podStartSLOduration=3.474781193 podStartE2EDuration="13.793217567s" podCreationTimestamp="2026-01-20 16:50:48 +0000 UTC" firstStartedPulling="2026-01-20 16:50:49.770796199 +0000 UTC m=+1168.015400995" lastFinishedPulling="2026-01-20 16:51:00.089232563 +0000 UTC m=+1178.333837369" observedRunningTime="2026-01-20 16:51:01.788771757 +0000 UTC m=+1180.033376563" watchObservedRunningTime="2026-01-20 16:51:01.793217567 +0000 UTC m=+1180.037822363" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.806517 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-588ff59d7c-f4fvk" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.811464 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-588ff59d7c-f4fvk" event={"ID":"d3afe443-c0e4-49f2-9245-29db8eeefba5","Type":"ContainerDied","Data":"f1f1d961013141971b78badc753cf28255d0ea76ad41f05356f6b162c3588969"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.811544 4995 scope.go:117] "RemoveContainer" containerID="513c03e1212539567d9fe49c930fb99027cdf27baa3e69d25f1cf4364f17d310" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.832326 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4a08e54b-e5a6-46a5-a634-54fb40f817ba","Type":"ContainerStarted","Data":"857e92a1ac64d12a38e69f996f1630e842ec90b00bb5cde96e449cf6bfb861ad"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.838160 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-674779b598-44vdg" event={"ID":"87360161-2c16-453b-bfeb-649cd107fdf0","Type":"ContainerStarted","Data":"7e9e80fb5624d35b6afd57655ec7f016747e5901efba9ccc1c6f597bdda3ca9d"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.843462 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e4fe40d8-90bd-4206-a304-91c67e90d8c0","Type":"ContainerDied","Data":"a5883c5d7b4ea9192cb096679baa86e3276cf1f13aa725092778fa431c1f7269"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.843548 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.851708 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" event={"ID":"f8c780d8-786b-4f35-a3ff-22b2e9081e1d","Type":"ContainerStarted","Data":"51972be83ed45c79182daef86f3655a4ff7cbbf055c6dee26b7249bc9b01d686"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.864925 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6db4b5b7df-cv7h4" event={"ID":"701cf418-d6f5-4326-b237-2fd120de4bd3","Type":"ContainerDied","Data":"c9f3e1f5fd4ca075bbc145faefb918f6a017aba7db6bc9014583e64d1efe4cff"} Jan 20 16:51:01 crc kubenswrapper[4995]: I0120 16:51:01.865023 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.038753 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-config-data\") pod \"d3afe443-c0e4-49f2-9245-29db8eeefba5\" (UID: \"d3afe443-c0e4-49f2-9245-29db8eeefba5\") " Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.041779 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-config-data" (OuterVolumeSpecName: "config-data") pod "d3afe443-c0e4-49f2-9245-29db8eeefba5" (UID: "d3afe443-c0e4-49f2-9245-29db8eeefba5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.122938 4995 scope.go:117] "RemoveContainer" containerID="5f7d1b1b1020d3572a3af35cb8858236820c5f35f7db8307b2befccacebdde4f" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.141117 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d3afe443-c0e4-49f2-9245-29db8eeefba5-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.394542 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.483983 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.484480 4995 scope.go:117] "RemoveContainer" containerID="4c7d9676d54da691d7994d6b939ffdcf7736aa9bebfc1880eddcfb27daac51ab" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.496185 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.497705 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511027 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Jan 20 16:51:02 crc kubenswrapper[4995]: E0120 16:51:02.511454 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerName="neutron-api" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511466 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerName="neutron-api" Jan 20 16:51:02 crc kubenswrapper[4995]: E0120 16:51:02.511501 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerName="horizon-log" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511509 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerName="horizon-log" Jan 20 16:51:02 crc kubenswrapper[4995]: E0120 16:51:02.511529 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerName="horizon" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511536 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerName="horizon" Jan 20 16:51:02 crc kubenswrapper[4995]: E0120 16:51:02.511550 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerName="horizon-log" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511576 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerName="horizon-log" Jan 20 16:51:02 crc kubenswrapper[4995]: E0120 16:51:02.511595 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511601 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api" Jan 20 16:51:02 crc kubenswrapper[4995]: E0120 16:51:02.511612 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerName="neutron-httpd" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511618 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerName="neutron-httpd" Jan 20 16:51:02 crc kubenswrapper[4995]: E0120 16:51:02.511628 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api-log" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511653 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api-log" Jan 20 16:51:02 crc kubenswrapper[4995]: E0120 16:51:02.511663 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerName="horizon" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511669 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerName="horizon" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511904 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerName="horizon-log" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511917 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerName="horizon-log" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511929 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerName="neutron-api" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511945 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" containerName="neutron-httpd" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511972 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" containerName="horizon" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511980 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3afe443-c0e4-49f2-9245-29db8eeefba5" containerName="horizon" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511989 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.511997 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" containerName="watcher-api-log" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.519854 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.520824 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.522621 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.522807 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.524930 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661101 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j87d7\" (UniqueName: \"kubernetes.io/projected/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-kube-api-access-j87d7\") pod \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661196 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-combined-ca-bundle\") pod \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661235 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-ovndb-tls-certs\") pod \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661344 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-httpd-config\") pod \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661381 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-config\") pod \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\" (UID: \"8a05857e-09be-4a5d-8e6d-00ffcb0b2400\") " Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661645 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e797cd4f-fdf8-485b-94e6-2a1105dedb71-logs\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661702 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-public-tls-certs\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661737 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661807 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7z6j\" (UniqueName: \"kubernetes.io/projected/e797cd4f-fdf8-485b-94e6-2a1105dedb71-kube-api-access-g7z6j\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661856 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661876 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-config-data\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.661922 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.669711 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "8a05857e-09be-4a5d-8e6d-00ffcb0b2400" (UID: "8a05857e-09be-4a5d-8e6d-00ffcb0b2400"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.693745 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-kube-api-access-j87d7" (OuterVolumeSpecName: "kube-api-access-j87d7") pod "8a05857e-09be-4a5d-8e6d-00ffcb0b2400" (UID: "8a05857e-09be-4a5d-8e6d-00ffcb0b2400"). InnerVolumeSpecName "kube-api-access-j87d7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.727834 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-config" (OuterVolumeSpecName: "config") pod "8a05857e-09be-4a5d-8e6d-00ffcb0b2400" (UID: "8a05857e-09be-4a5d-8e6d-00ffcb0b2400"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.735350 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a05857e-09be-4a5d-8e6d-00ffcb0b2400" (UID: "8a05857e-09be-4a5d-8e6d-00ffcb0b2400"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763183 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7z6j\" (UniqueName: \"kubernetes.io/projected/e797cd4f-fdf8-485b-94e6-2a1105dedb71-kube-api-access-g7z6j\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763247 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-config-data\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763267 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763311 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763351 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e797cd4f-fdf8-485b-94e6-2a1105dedb71-logs\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763386 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-public-tls-certs\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763411 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763512 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763523 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j87d7\" (UniqueName: \"kubernetes.io/projected/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-kube-api-access-j87d7\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763533 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.763541 4995 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.764439 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e797cd4f-fdf8-485b-94e6-2a1105dedb71-logs\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.768467 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.771692 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-config-data\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.772189 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "8a05857e-09be-4a5d-8e6d-00ffcb0b2400" (UID: "8a05857e-09be-4a5d-8e6d-00ffcb0b2400"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.772366 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-public-tls-certs\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.772705 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.774908 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.780795 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e797cd4f-fdf8-485b-94e6-2a1105dedb71-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.781293 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7z6j\" (UniqueName: \"kubernetes.io/projected/e797cd4f-fdf8-485b-94e6-2a1105dedb71-kube-api-access-g7z6j\") pod \"watcher-api-0\" (UID: \"e797cd4f-fdf8-485b-94e6-2a1105dedb71\") " pod="openstack/watcher-api-0" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.864903 4995 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8a05857e-09be-4a5d-8e6d-00ffcb0b2400-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.891640 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8dfc578d4-g6p6m" event={"ID":"8a05857e-09be-4a5d-8e6d-00ffcb0b2400","Type":"ContainerDied","Data":"d98b30eb6a6bd1d782d580b8551a61aac704eedf14bad87f9d819feb5c166c48"} Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.891698 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8dfc578d4-g6p6m" Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.894801 4995 generic.go:334] "Generic (PLEG): container finished" podID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerID="a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866" exitCode=0 Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.894831 4995 generic.go:334] "Generic (PLEG): container finished" podID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerID="6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6" exitCode=2 Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.894898 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e646811-19e9-4a68-a419-6d0db9feb93e","Type":"ContainerDied","Data":"a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866"} Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.894935 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e646811-19e9-4a68-a419-6d0db9feb93e","Type":"ContainerDied","Data":"6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6"} Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.900910 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-674779b598-44vdg" event={"ID":"87360161-2c16-453b-bfeb-649cd107fdf0","Type":"ContainerStarted","Data":"536e51fbc90069d6563253560e9c935f83429f694dc4658bdb6c10152539107b"} Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.902695 4995 generic.go:334] "Generic (PLEG): container finished" podID="ef8757f0-901f-4c5f-ac40-85d643918a47" containerID="76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06" exitCode=0 Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.903130 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" event={"ID":"ef8757f0-901f-4c5f-ac40-85d643918a47","Type":"ContainerDied","Data":"76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06"} Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.907164 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-588ff59d7c-f4fvk"] Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.915188 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-588ff59d7c-f4fvk"] Jan 20 16:51:02 crc kubenswrapper[4995]: I0120 16:51:02.921359 4995 scope.go:117] "RemoveContainer" containerID="287777f6ea29127ae0c036a791958f189d726ed2cb8f4828480e642fd9ecb222" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.026191 4995 scope.go:117] "RemoveContainer" containerID="535ee8d3926b615b662db7b8b0bfb8429276fe61f3678c3c6a17376ba2509b31" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.036410 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.060244 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8dfc578d4-g6p6m"] Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.073792 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8dfc578d4-g6p6m"] Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.250360 4995 scope.go:117] "RemoveContainer" containerID="0d07b2ef5695d0b7aade74ffebd5e08dfe97b23fafbe31f5e44ce4e19adc4fa6" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.275361 4995 scope.go:117] "RemoveContainer" containerID="2379974f509cd0ea89130ddc261469f046e2000dbfe03c476aaae116fca92ae2" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.429016 4995 scope.go:117] "RemoveContainer" containerID="581d36a7796922e7f8417f9a42dd99de4ddd3acee35cb13a89958184095bd8e0" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.831547 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 20 16:51:03 crc kubenswrapper[4995]: W0120 16:51:03.836334 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode797cd4f_fdf8_485b_94e6_2a1105dedb71.slice/crio-24724fd668856c908275239627df4fa2addf59bdc543a7dd8e22acf161e43b9d WatchSource:0}: Error finding container 24724fd668856c908275239627df4fa2addf59bdc543a7dd8e22acf161e43b9d: Status 404 returned error can't find the container with id 24724fd668856c908275239627df4fa2addf59bdc543a7dd8e22acf161e43b9d Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.944831 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4a08e54b-e5a6-46a5-a634-54fb40f817ba","Type":"ContainerStarted","Data":"ab83c525e93061bc3b09f2e3f93e8281a5bd4771395e23b0826449cb4956e0d2"} Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.947651 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-674779b598-44vdg" event={"ID":"87360161-2c16-453b-bfeb-649cd107fdf0","Type":"ContainerStarted","Data":"3756f826d195daa511c55fd4e19987632a28869cfdfd8b1d317626abe2426b55"} Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.948174 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.948308 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.959667 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" event={"ID":"ef8757f0-901f-4c5f-ac40-85d643918a47","Type":"ContainerStarted","Data":"b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6"} Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.960472 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.969187 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e797cd4f-fdf8-485b-94e6-2a1105dedb71","Type":"ContainerStarted","Data":"24724fd668856c908275239627df4fa2addf59bdc543a7dd8e22acf161e43b9d"} Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.977692 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-674779b598-44vdg" podStartSLOduration=11.977672111 podStartE2EDuration="11.977672111s" podCreationTimestamp="2026-01-20 16:50:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:51:03.973342984 +0000 UTC m=+1182.217947800" watchObservedRunningTime="2026-01-20 16:51:03.977672111 +0000 UTC m=+1182.222276917" Jan 20 16:51:03 crc kubenswrapper[4995]: I0120 16:51:03.998923 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" podStartSLOduration=5.998905866 podStartE2EDuration="5.998905866s" podCreationTimestamp="2026-01-20 16:50:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:51:03.991532566 +0000 UTC m=+1182.236137372" watchObservedRunningTime="2026-01-20 16:51:03.998905866 +0000 UTC m=+1182.243510672" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.012174 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a05857e-09be-4a5d-8e6d-00ffcb0b2400" path="/var/lib/kubelet/pods/8a05857e-09be-4a5d-8e6d-00ffcb0b2400/volumes" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.013161 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3afe443-c0e4-49f2-9245-29db8eeefba5" path="/var/lib/kubelet/pods/d3afe443-c0e4-49f2-9245-29db8eeefba5/volumes" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.013983 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4fe40d8-90bd-4206-a304-91c67e90d8c0" path="/var/lib/kubelet/pods/e4fe40d8-90bd-4206-a304-91c67e90d8c0/volumes" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.867737 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.917617 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-log-httpd\") pod \"1e646811-19e9-4a68-a419-6d0db9feb93e\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.917762 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-sg-core-conf-yaml\") pod \"1e646811-19e9-4a68-a419-6d0db9feb93e\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.917805 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jr2b\" (UniqueName: \"kubernetes.io/projected/1e646811-19e9-4a68-a419-6d0db9feb93e-kube-api-access-6jr2b\") pod \"1e646811-19e9-4a68-a419-6d0db9feb93e\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.917843 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-config-data\") pod \"1e646811-19e9-4a68-a419-6d0db9feb93e\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.917889 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-combined-ca-bundle\") pod \"1e646811-19e9-4a68-a419-6d0db9feb93e\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.917941 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-run-httpd\") pod \"1e646811-19e9-4a68-a419-6d0db9feb93e\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.917966 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-scripts\") pod \"1e646811-19e9-4a68-a419-6d0db9feb93e\" (UID: \"1e646811-19e9-4a68-a419-6d0db9feb93e\") " Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.918340 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1e646811-19e9-4a68-a419-6d0db9feb93e" (UID: "1e646811-19e9-4a68-a419-6d0db9feb93e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.918841 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1e646811-19e9-4a68-a419-6d0db9feb93e" (UID: "1e646811-19e9-4a68-a419-6d0db9feb93e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.923523 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-scripts" (OuterVolumeSpecName: "scripts") pod "1e646811-19e9-4a68-a419-6d0db9feb93e" (UID: "1e646811-19e9-4a68-a419-6d0db9feb93e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.933298 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e646811-19e9-4a68-a419-6d0db9feb93e-kube-api-access-6jr2b" (OuterVolumeSpecName: "kube-api-access-6jr2b") pod "1e646811-19e9-4a68-a419-6d0db9feb93e" (UID: "1e646811-19e9-4a68-a419-6d0db9feb93e"). InnerVolumeSpecName "kube-api-access-6jr2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.945205 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1e646811-19e9-4a68-a419-6d0db9feb93e" (UID: "1e646811-19e9-4a68-a419-6d0db9feb93e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.985449 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1e646811-19e9-4a68-a419-6d0db9feb93e" (UID: "1e646811-19e9-4a68-a419-6d0db9feb93e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.995360 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-config-data" (OuterVolumeSpecName: "config-data") pod "1e646811-19e9-4a68-a419-6d0db9feb93e" (UID: "1e646811-19e9-4a68-a419-6d0db9feb93e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.999838 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e797cd4f-fdf8-485b-94e6-2a1105dedb71","Type":"ContainerStarted","Data":"cc886a9ac1219c1a0bc673d54c395e040f99791ca9f0cfdf0869454b39a96cf9"} Jan 20 16:51:04 crc kubenswrapper[4995]: I0120 16:51:04.999889 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"e797cd4f-fdf8-485b-94e6-2a1105dedb71","Type":"ContainerStarted","Data":"96170fca21ed49ec95fd73e9a887321a4cf2acc7069fbc33f73ea25ecf96837b"} Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.000914 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.008681 4995 generic.go:334] "Generic (PLEG): container finished" podID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerID="864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9" exitCode=0 Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.008812 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.009237 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e646811-19e9-4a68-a419-6d0db9feb93e","Type":"ContainerDied","Data":"864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9"} Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.009290 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1e646811-19e9-4a68-a419-6d0db9feb93e","Type":"ContainerDied","Data":"1933e45f4ec3de87b94ea5b28f797c45168e555ccd6ff0d96212cd11ac314fe0"} Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.009309 4995 scope.go:117] "RemoveContainer" containerID="a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.020395 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4a08e54b-e5a6-46a5-a634-54fb40f817ba","Type":"ContainerStarted","Data":"6896b7c96b50c10f03dca752bbf3bf62a6b8fd3b9be112bbcdd778fd37f6fcd3"} Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.020703 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerName="cinder-api-log" containerID="cri-o://ab83c525e93061bc3b09f2e3f93e8281a5bd4771395e23b0826449cb4956e0d2" gracePeriod=30 Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.021144 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerName="cinder-api" containerID="cri-o://6896b7c96b50c10f03dca752bbf3bf62a6b8fd3b9be112bbcdd778fd37f6fcd3" gracePeriod=30 Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.021175 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.022553 4995 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.022581 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.022590 4995 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1e646811-19e9-4a68-a419-6d0db9feb93e-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.022600 4995 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.022611 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jr2b\" (UniqueName: \"kubernetes.io/projected/1e646811-19e9-4a68-a419-6d0db9feb93e-kube-api-access-6jr2b\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.022621 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.022629 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e646811-19e9-4a68-a419-6d0db9feb93e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.022976 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=3.022962345 podStartE2EDuration="3.022962345s" podCreationTimestamp="2026-01-20 16:51:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:51:05.022405571 +0000 UTC m=+1183.267010377" watchObservedRunningTime="2026-01-20 16:51:05.022962345 +0000 UTC m=+1183.267567151" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.041546 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b3000b30-37ee-4724-a13f-71f360b1af38","Type":"ContainerStarted","Data":"3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3"} Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.041593 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b3000b30-37ee-4724-a13f-71f360b1af38","Type":"ContainerStarted","Data":"acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d"} Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.072915 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=7.072890759 podStartE2EDuration="7.072890759s" podCreationTimestamp="2026-01-20 16:50:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:51:05.070834793 +0000 UTC m=+1183.315439609" watchObservedRunningTime="2026-01-20 16:51:05.072890759 +0000 UTC m=+1183.317495575" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.081308 4995 scope.go:117] "RemoveContainer" containerID="6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.103458 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.503553522 podStartE2EDuration="7.103439496s" podCreationTimestamp="2026-01-20 16:50:58 +0000 UTC" firstStartedPulling="2026-01-20 16:51:01.364799448 +0000 UTC m=+1179.609404254" lastFinishedPulling="2026-01-20 16:51:02.964685422 +0000 UTC m=+1181.209290228" observedRunningTime="2026-01-20 16:51:05.092597423 +0000 UTC m=+1183.337202239" watchObservedRunningTime="2026-01-20 16:51:05.103439496 +0000 UTC m=+1183.348044302" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.104956 4995 scope.go:117] "RemoveContainer" containerID="864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.163479 4995 scope.go:117] "RemoveContainer" containerID="a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866" Jan 20 16:51:05 crc kubenswrapper[4995]: E0120 16:51:05.165266 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866\": container with ID starting with a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866 not found: ID does not exist" containerID="a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.165295 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866"} err="failed to get container status \"a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866\": rpc error: code = NotFound desc = could not find container \"a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866\": container with ID starting with a7cf10c250f8a8ee61b576926b2d86fc8dec5b569465d4d02b572a9daa538866 not found: ID does not exist" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.165315 4995 scope.go:117] "RemoveContainer" containerID="6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6" Jan 20 16:51:05 crc kubenswrapper[4995]: E0120 16:51:05.167185 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6\": container with ID starting with 6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6 not found: ID does not exist" containerID="6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.167208 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6"} err="failed to get container status \"6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6\": rpc error: code = NotFound desc = could not find container \"6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6\": container with ID starting with 6664ea6c464a0a5eda6bef8dba641bedb65fc490c9455469dafabbbe37f99ca6 not found: ID does not exist" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.167222 4995 scope.go:117] "RemoveContainer" containerID="864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9" Jan 20 16:51:05 crc kubenswrapper[4995]: E0120 16:51:05.167598 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9\": container with ID starting with 864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9 not found: ID does not exist" containerID="864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.167613 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9"} err="failed to get container status \"864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9\": rpc error: code = NotFound desc = could not find container \"864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9\": container with ID starting with 864990dd96ca1414438bf7670c316f1de6c8c741e856858a57357e904b359cf9 not found: ID does not exist" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.182136 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.195261 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.209607 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:05 crc kubenswrapper[4995]: E0120 16:51:05.209971 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="ceilometer-notification-agent" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.209982 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="ceilometer-notification-agent" Jan 20 16:51:05 crc kubenswrapper[4995]: E0120 16:51:05.209994 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="sg-core" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.210000 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="sg-core" Jan 20 16:51:05 crc kubenswrapper[4995]: E0120 16:51:05.210021 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="proxy-httpd" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.210026 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="proxy-httpd" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.210252 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="proxy-httpd" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.210269 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="ceilometer-notification-agent" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.210285 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" containerName="sg-core" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.212250 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.216684 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.216904 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.227370 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tldmx\" (UniqueName: \"kubernetes.io/projected/1f72d75c-837a-45db-9879-b2fcda5ff029-kube-api-access-tldmx\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.227488 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-run-httpd\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.227533 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-config-data\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.227576 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-scripts\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.227735 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-log-httpd\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.227783 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.227804 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.232411 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.329017 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-run-httpd\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.329066 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-config-data\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.329114 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-scripts\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.329175 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-log-httpd\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.329203 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.329220 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.329272 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tldmx\" (UniqueName: \"kubernetes.io/projected/1f72d75c-837a-45db-9879-b2fcda5ff029-kube-api-access-tldmx\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.329673 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-run-httpd\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.329791 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-log-httpd\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.339018 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-scripts\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.339174 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.343554 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.351971 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-config-data\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.353812 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tldmx\" (UniqueName: \"kubernetes.io/projected/1f72d75c-837a-45db-9879-b2fcda5ff029-kube-api-access-tldmx\") pod \"ceilometer-0\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " pod="openstack/ceilometer-0" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.494881 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.523136 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:51:05 crc kubenswrapper[4995]: I0120 16:51:05.537358 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.017352 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e646811-19e9-4a68-a419-6d0db9feb93e" path="/var/lib/kubelet/pods/1e646811-19e9-4a68-a419-6d0db9feb93e/volumes" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.029171 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.086987 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerStarted","Data":"0d5331e02d7462fd81beb9148cd57105a89be33b43352e6313bbaeb9496c242e"} Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.100049 4995 generic.go:334] "Generic (PLEG): container finished" podID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerID="6896b7c96b50c10f03dca752bbf3bf62a6b8fd3b9be112bbcdd778fd37f6fcd3" exitCode=0 Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.100490 4995 generic.go:334] "Generic (PLEG): container finished" podID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerID="ab83c525e93061bc3b09f2e3f93e8281a5bd4771395e23b0826449cb4956e0d2" exitCode=143 Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.100967 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4a08e54b-e5a6-46a5-a634-54fb40f817ba","Type":"ContainerDied","Data":"6896b7c96b50c10f03dca752bbf3bf62a6b8fd3b9be112bbcdd778fd37f6fcd3"} Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.101044 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4a08e54b-e5a6-46a5-a634-54fb40f817ba","Type":"ContainerDied","Data":"ab83c525e93061bc3b09f2e3f93e8281a5bd4771395e23b0826449cb4956e0d2"} Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.493227 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.595844 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-scripts\") pod \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.595886 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skl8f\" (UniqueName: \"kubernetes.io/projected/4a08e54b-e5a6-46a5-a634-54fb40f817ba-kube-api-access-skl8f\") pod \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.595952 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4a08e54b-e5a6-46a5-a634-54fb40f817ba-etc-machine-id\") pod \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.595985 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data-custom\") pod \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.596034 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data\") pod \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.596181 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-combined-ca-bundle\") pod \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.596206 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a08e54b-e5a6-46a5-a634-54fb40f817ba-logs\") pod \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\" (UID: \"4a08e54b-e5a6-46a5-a634-54fb40f817ba\") " Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.596156 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4a08e54b-e5a6-46a5-a634-54fb40f817ba-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4a08e54b-e5a6-46a5-a634-54fb40f817ba" (UID: "4a08e54b-e5a6-46a5-a634-54fb40f817ba"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.596730 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a08e54b-e5a6-46a5-a634-54fb40f817ba-logs" (OuterVolumeSpecName: "logs") pod "4a08e54b-e5a6-46a5-a634-54fb40f817ba" (UID: "4a08e54b-e5a6-46a5-a634-54fb40f817ba"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.601422 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-scripts" (OuterVolumeSpecName: "scripts") pod "4a08e54b-e5a6-46a5-a634-54fb40f817ba" (UID: "4a08e54b-e5a6-46a5-a634-54fb40f817ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.604115 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4a08e54b-e5a6-46a5-a634-54fb40f817ba" (UID: "4a08e54b-e5a6-46a5-a634-54fb40f817ba"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.614314 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a08e54b-e5a6-46a5-a634-54fb40f817ba-kube-api-access-skl8f" (OuterVolumeSpecName: "kube-api-access-skl8f") pod "4a08e54b-e5a6-46a5-a634-54fb40f817ba" (UID: "4a08e54b-e5a6-46a5-a634-54fb40f817ba"). InnerVolumeSpecName "kube-api-access-skl8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.629674 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a08e54b-e5a6-46a5-a634-54fb40f817ba" (UID: "4a08e54b-e5a6-46a5-a634-54fb40f817ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.656849 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data" (OuterVolumeSpecName: "config-data") pod "4a08e54b-e5a6-46a5-a634-54fb40f817ba" (UID: "4a08e54b-e5a6-46a5-a634-54fb40f817ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.698966 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.698989 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a08e54b-e5a6-46a5-a634-54fb40f817ba-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.698999 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.699006 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skl8f\" (UniqueName: \"kubernetes.io/projected/4a08e54b-e5a6-46a5-a634-54fb40f817ba-kube-api-access-skl8f\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.699016 4995 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4a08e54b-e5a6-46a5-a634-54fb40f817ba-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.699024 4995 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:06 crc kubenswrapper[4995]: I0120 16:51:06.699032 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a08e54b-e5a6-46a5-a634-54fb40f817ba-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.125809 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.126566 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.128624 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"4a08e54b-e5a6-46a5-a634-54fb40f817ba","Type":"ContainerDied","Data":"857e92a1ac64d12a38e69f996f1630e842ec90b00bb5cde96e449cf6bfb861ad"} Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.128686 4995 scope.go:117] "RemoveContainer" containerID="6896b7c96b50c10f03dca752bbf3bf62a6b8fd3b9be112bbcdd778fd37f6fcd3" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.160704 4995 scope.go:117] "RemoveContainer" containerID="ab83c525e93061bc3b09f2e3f93e8281a5bd4771395e23b0826449cb4956e0d2" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.172644 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.189470 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.203176 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 20 16:51:07 crc kubenswrapper[4995]: E0120 16:51:07.203537 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerName="cinder-api-log" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.203553 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerName="cinder-api-log" Jan 20 16:51:07 crc kubenswrapper[4995]: E0120 16:51:07.203576 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerName="cinder-api" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.203582 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerName="cinder-api" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.203780 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerName="cinder-api-log" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.203802 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" containerName="cinder-api" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.204708 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.207701 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.207928 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.208109 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.213443 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59abbdb2-429a-473e-ae6b-8f731b6cf17d-logs\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.213495 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-config-data\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.213573 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-config-data-custom\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.213632 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z87c\" (UniqueName: \"kubernetes.io/projected/59abbdb2-429a-473e-ae6b-8f731b6cf17d-kube-api-access-8z87c\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.213665 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/59abbdb2-429a-473e-ae6b-8f731b6cf17d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.213689 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-scripts\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.213746 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.213781 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.213893 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-public-tls-certs\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.214682 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.256035 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7cd588cc5b-pmhlg" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.315346 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-public-tls-certs\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.315429 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59abbdb2-429a-473e-ae6b-8f731b6cf17d-logs\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.315454 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-config-data\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.315739 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-config-data-custom\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.315812 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z87c\" (UniqueName: \"kubernetes.io/projected/59abbdb2-429a-473e-ae6b-8f731b6cf17d-kube-api-access-8z87c\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.315856 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/59abbdb2-429a-473e-ae6b-8f731b6cf17d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.315879 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-scripts\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.315946 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.316009 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.321553 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/59abbdb2-429a-473e-ae6b-8f731b6cf17d-etc-machine-id\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.322024 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59abbdb2-429a-473e-ae6b-8f731b6cf17d-logs\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.325526 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-public-tls-certs\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.325691 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-config-data-custom\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.326564 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.326739 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-scripts\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.328849 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.336544 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84df7dbffb-njbnq"] Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.336845 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-84df7dbffb-njbnq" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon-log" containerID="cri-o://d5e83fb3d6635ff7c522d7f7fcf25ac6e8c713eb35fed7645bab1f6b93ff4b23" gracePeriod=30 Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.337166 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-84df7dbffb-njbnq" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" containerID="cri-o://b8e948adfe77d2cd9ea089ac1055ca05cf957452ed332421df4521eb39eda287" gracePeriod=30 Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.340374 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z87c\" (UniqueName: \"kubernetes.io/projected/59abbdb2-429a-473e-ae6b-8f731b6cf17d-kube-api-access-8z87c\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.345543 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-84df7dbffb-njbnq" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": EOF" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.349932 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59abbdb2-429a-473e-ae6b-8f731b6cf17d-config-data\") pod \"cinder-api-0\" (UID: \"59abbdb2-429a-473e-ae6b-8f731b6cf17d\") " pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.559860 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 16:51:07 crc kubenswrapper[4995]: I0120 16:51:07.789045 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 20 16:51:08 crc kubenswrapper[4995]: I0120 16:51:08.002797 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a08e54b-e5a6-46a5-a634-54fb40f817ba" path="/var/lib/kubelet/pods/4a08e54b-e5a6-46a5-a634-54fb40f817ba/volumes" Jan 20 16:51:08 crc kubenswrapper[4995]: W0120 16:51:08.022964 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59abbdb2_429a_473e_ae6b_8f731b6cf17d.slice/crio-73023be8670788c431f68a945046c1df7b8f1b86b02a42ec65d9f38754ba0959 WatchSource:0}: Error finding container 73023be8670788c431f68a945046c1df7b8f1b86b02a42ec65d9f38754ba0959: Status 404 returned error can't find the container with id 73023be8670788c431f68a945046c1df7b8f1b86b02a42ec65d9f38754ba0959 Jan 20 16:51:08 crc kubenswrapper[4995]: I0120 16:51:08.025112 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 20 16:51:08 crc kubenswrapper[4995]: I0120 16:51:08.037216 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 20 16:51:08 crc kubenswrapper[4995]: I0120 16:51:08.149277 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"59abbdb2-429a-473e-ae6b-8f731b6cf17d","Type":"ContainerStarted","Data":"73023be8670788c431f68a945046c1df7b8f1b86b02a42ec65d9f38754ba0959"} Jan 20 16:51:08 crc kubenswrapper[4995]: I0120 16:51:08.156113 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerStarted","Data":"c0ac39e33abbf83c5bba650b4b43570ca2c39a8140935e0a296726ed2caf4666"} Jan 20 16:51:08 crc kubenswrapper[4995]: I0120 16:51:08.770711 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 20 16:51:08 crc kubenswrapper[4995]: I0120 16:51:08.981393 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:51:09 crc kubenswrapper[4995]: I0120 16:51:09.049733 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-9jjtp"] Jan 20 16:51:09 crc kubenswrapper[4995]: I0120 16:51:09.052581 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" podUID="ff583ed2-5f45-45fe-aa25-f02872f482b1" containerName="dnsmasq-dns" containerID="cri-o://b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a" gracePeriod=10 Jan 20 16:51:09 crc kubenswrapper[4995]: I0120 16:51:09.168505 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"59abbdb2-429a-473e-ae6b-8f731b6cf17d","Type":"ContainerStarted","Data":"5e06c6ebe3411235e14a53ac9c064037e2dc99fc6b13f8e41ecab31b287de5c9"} Jan 20 16:51:09 crc kubenswrapper[4995]: I0120 16:51:09.172624 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerStarted","Data":"44e4ae161bb59b4e914f8e50005b6876716285b0fa11f2c3e7bb0c4c54f4dbe5"} Jan 20 16:51:09 crc kubenswrapper[4995]: I0120 16:51:09.394272 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:51:09 crc kubenswrapper[4995]: I0120 16:51:09.954025 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.013051 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.182338 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"59abbdb2-429a-473e-ae6b-8f731b6cf17d","Type":"ContainerStarted","Data":"7c6707758946250d4613f1b01ad4a62ef609057110e66b51e684cdabf62f4c7a"} Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.183195 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjp2b\" (UniqueName: \"kubernetes.io/projected/ff583ed2-5f45-45fe-aa25-f02872f482b1-kube-api-access-rjp2b\") pod \"ff583ed2-5f45-45fe-aa25-f02872f482b1\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.183333 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-config\") pod \"ff583ed2-5f45-45fe-aa25-f02872f482b1\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.183403 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-swift-storage-0\") pod \"ff583ed2-5f45-45fe-aa25-f02872f482b1\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.183497 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-nb\") pod \"ff583ed2-5f45-45fe-aa25-f02872f482b1\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.183520 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-svc\") pod \"ff583ed2-5f45-45fe-aa25-f02872f482b1\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.183567 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-sb\") pod \"ff583ed2-5f45-45fe-aa25-f02872f482b1\" (UID: \"ff583ed2-5f45-45fe-aa25-f02872f482b1\") " Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.197890 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff583ed2-5f45-45fe-aa25-f02872f482b1-kube-api-access-rjp2b" (OuterVolumeSpecName: "kube-api-access-rjp2b") pod "ff583ed2-5f45-45fe-aa25-f02872f482b1" (UID: "ff583ed2-5f45-45fe-aa25-f02872f482b1"). InnerVolumeSpecName "kube-api-access-rjp2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.205384 4995 generic.go:334] "Generic (PLEG): container finished" podID="ff583ed2-5f45-45fe-aa25-f02872f482b1" containerID="b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a" exitCode=0 Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.205464 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" event={"ID":"ff583ed2-5f45-45fe-aa25-f02872f482b1","Type":"ContainerDied","Data":"b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a"} Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.205490 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" event={"ID":"ff583ed2-5f45-45fe-aa25-f02872f482b1","Type":"ContainerDied","Data":"ceda8149265e576b7154304fec6557dd148be6b67d46f2a288bb06e453d6900c"} Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.205508 4995 scope.go:117] "RemoveContainer" containerID="b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.205640 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-9jjtp" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.240025 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ff583ed2-5f45-45fe-aa25-f02872f482b1" (UID: "ff583ed2-5f45-45fe-aa25-f02872f482b1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.242734 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerStarted","Data":"f0525f519fbca6c22b729b91d5a8e776a9fdb99a7eeb6b16f8a156b4c888f7e5"} Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.250587 4995 scope.go:117] "RemoveContainer" containerID="c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.259990 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ff583ed2-5f45-45fe-aa25-f02872f482b1" (UID: "ff583ed2-5f45-45fe-aa25-f02872f482b1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.261456 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ff583ed2-5f45-45fe-aa25-f02872f482b1" (UID: "ff583ed2-5f45-45fe-aa25-f02872f482b1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.262292 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ff583ed2-5f45-45fe-aa25-f02872f482b1" (UID: "ff583ed2-5f45-45fe-aa25-f02872f482b1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.274936 4995 scope.go:117] "RemoveContainer" containerID="b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a" Jan 20 16:51:10 crc kubenswrapper[4995]: E0120 16:51:10.277438 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a\": container with ID starting with b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a not found: ID does not exist" containerID="b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.277478 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a"} err="failed to get container status \"b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a\": rpc error: code = NotFound desc = could not find container \"b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a\": container with ID starting with b1331947d52ba6c2edceb85d421254ca0c80115cb1f819c70e05c33c1b1dd63a not found: ID does not exist" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.277505 4995 scope.go:117] "RemoveContainer" containerID="c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3" Jan 20 16:51:10 crc kubenswrapper[4995]: E0120 16:51:10.277884 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3\": container with ID starting with c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3 not found: ID does not exist" containerID="c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.277918 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3"} err="failed to get container status \"c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3\": rpc error: code = NotFound desc = could not find container \"c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3\": container with ID starting with c5dd69872837a6d0a66973aa70a8eebcd6d53c06ecc97ccb89027a3ae75a5de3 not found: ID does not exist" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.280603 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-config" (OuterVolumeSpecName: "config") pod "ff583ed2-5f45-45fe-aa25-f02872f482b1" (UID: "ff583ed2-5f45-45fe-aa25-f02872f482b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.285658 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.285690 4995 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.285702 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.285710 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.285719 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ff583ed2-5f45-45fe-aa25-f02872f482b1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.285727 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjp2b\" (UniqueName: \"kubernetes.io/projected/ff583ed2-5f45-45fe-aa25-f02872f482b1-kube-api-access-rjp2b\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.546412 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-9jjtp"] Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.557923 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-9jjtp"] Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.590254 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.643459 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-679d748c9b-mrbbx" Jan 20 16:51:10 crc kubenswrapper[4995]: I0120 16:51:10.954588 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-84df7dbffb-njbnq" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:43366->10.217.0.162:8443: read: connection reset by peer" Jan 20 16:51:10 crc kubenswrapper[4995]: E0120 16:51:10.991969 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8c780d8_786b_4f35_a3ff_22b2e9081e1d.slice/crio-conmon-51972be83ed45c79182daef86f3655a4ff7cbbf055c6dee26b7249bc9b01d686.scope\": RecentStats: unable to find data in memory cache]" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.275999 4995 generic.go:334] "Generic (PLEG): container finished" podID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerID="b8e948adfe77d2cd9ea089ac1055ca05cf957452ed332421df4521eb39eda287" exitCode=0 Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.276356 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84df7dbffb-njbnq" event={"ID":"8e877da9-408f-40dd-8e4a-5173ba3d6988","Type":"ContainerDied","Data":"b8e948adfe77d2cd9ea089ac1055ca05cf957452ed332421df4521eb39eda287"} Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.280170 4995 generic.go:334] "Generic (PLEG): container finished" podID="f8c780d8-786b-4f35-a3ff-22b2e9081e1d" containerID="51972be83ed45c79182daef86f3655a4ff7cbbf055c6dee26b7249bc9b01d686" exitCode=137 Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.280284 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" event={"ID":"f8c780d8-786b-4f35-a3ff-22b2e9081e1d","Type":"ContainerDied","Data":"51972be83ed45c79182daef86f3655a4ff7cbbf055c6dee26b7249bc9b01d686"} Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.281167 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.312558 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.312542348 podStartE2EDuration="4.312542348s" podCreationTimestamp="2026-01-20 16:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:51:11.309331751 +0000 UTC m=+1189.553936557" watchObservedRunningTime="2026-01-20 16:51:11.312542348 +0000 UTC m=+1189.557147154" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.336397 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.436329 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-svc\") pod \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.436381 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-nb\") pod \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.436402 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-config\") pod \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.436442 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-sb\") pod \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.436477 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-swift-storage-0\") pod \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.436651 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n7tg5\" (UniqueName: \"kubernetes.io/projected/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-kube-api-access-n7tg5\") pod \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\" (UID: \"f8c780d8-786b-4f35-a3ff-22b2e9081e1d\") " Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.456643 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-kube-api-access-n7tg5" (OuterVolumeSpecName: "kube-api-access-n7tg5") pod "f8c780d8-786b-4f35-a3ff-22b2e9081e1d" (UID: "f8c780d8-786b-4f35-a3ff-22b2e9081e1d"). InnerVolumeSpecName "kube-api-access-n7tg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.539713 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n7tg5\" (UniqueName: \"kubernetes.io/projected/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-kube-api-access-n7tg5\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.645434 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f8c780d8-786b-4f35-a3ff-22b2e9081e1d" (UID: "f8c780d8-786b-4f35-a3ff-22b2e9081e1d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.690526 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f8c780d8-786b-4f35-a3ff-22b2e9081e1d" (UID: "f8c780d8-786b-4f35-a3ff-22b2e9081e1d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.696497 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f8c780d8-786b-4f35-a3ff-22b2e9081e1d" (UID: "f8c780d8-786b-4f35-a3ff-22b2e9081e1d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.703313 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-config" (OuterVolumeSpecName: "config") pod "f8c780d8-786b-4f35-a3ff-22b2e9081e1d" (UID: "f8c780d8-786b-4f35-a3ff-22b2e9081e1d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.716707 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f8c780d8-786b-4f35-a3ff-22b2e9081e1d" (UID: "f8c780d8-786b-4f35-a3ff-22b2e9081e1d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.722738 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-674779b598-44vdg" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.743999 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.744041 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.744055 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.744066 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.744100 4995 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f8c780d8-786b-4f35-a3ff-22b2e9081e1d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.805803 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-77d6d89bf8-nkl4h"] Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.806081 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-77d6d89bf8-nkl4h" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api-log" containerID="cri-o://2daad3af49862ef1f64bb065a90201dbe0da8597c8ec895043af546d54ca691b" gracePeriod=30 Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.806609 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-77d6d89bf8-nkl4h" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api" containerID="cri-o://2ac38c99dfc243cd4ea3c9b9e60c3f1697ca7adfd761b65a95f3b120e0e2a962" gracePeriod=30 Jan 20 16:51:11 crc kubenswrapper[4995]: I0120 16:51:11.999432 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff583ed2-5f45-45fe-aa25-f02872f482b1" path="/var/lib/kubelet/pods/ff583ed2-5f45-45fe-aa25-f02872f482b1/volumes" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.022384 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7f99b88f98-w6ztm" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.289995 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerStarted","Data":"46652561d3e77380839bf1829162c026f29cf74bfe54f1a795c45b01a62521bd"} Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.290232 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.291917 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.291932 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-qm6ct" event={"ID":"f8c780d8-786b-4f35-a3ff-22b2e9081e1d","Type":"ContainerDied","Data":"8332ff305aef87958a889aa83d6cd73460a254d545ece41c0ba05dabf2f02c6e"} Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.291966 4995 scope.go:117] "RemoveContainer" containerID="51972be83ed45c79182daef86f3655a4ff7cbbf055c6dee26b7249bc9b01d686" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.294459 4995 generic.go:334] "Generic (PLEG): container finished" podID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerID="2daad3af49862ef1f64bb065a90201dbe0da8597c8ec895043af546d54ca691b" exitCode=143 Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.294698 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77d6d89bf8-nkl4h" event={"ID":"3ce1f610-b53c-457f-89b8-d484d02bb210","Type":"ContainerDied","Data":"2daad3af49862ef1f64bb065a90201dbe0da8597c8ec895043af546d54ca691b"} Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.314637 4995 scope.go:117] "RemoveContainer" containerID="10bf7d88e3d5582b2f21c765a155109416c25575f05f0be64d87d1e33be7cfa7" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.321375 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.135743377 podStartE2EDuration="7.321353725s" podCreationTimestamp="2026-01-20 16:51:05 +0000 UTC" firstStartedPulling="2026-01-20 16:51:06.023194699 +0000 UTC m=+1184.267799505" lastFinishedPulling="2026-01-20 16:51:11.208805047 +0000 UTC m=+1189.453409853" observedRunningTime="2026-01-20 16:51:12.315464045 +0000 UTC m=+1190.560068861" watchObservedRunningTime="2026-01-20 16:51:12.321353725 +0000 UTC m=+1190.565958531" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.335977 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-qm6ct"] Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.344433 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-qm6ct"] Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.591631 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 20 16:51:12 crc kubenswrapper[4995]: E0120 16:51:12.592015 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8c780d8-786b-4f35-a3ff-22b2e9081e1d" containerName="dnsmasq-dns" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.592031 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8c780d8-786b-4f35-a3ff-22b2e9081e1d" containerName="dnsmasq-dns" Jan 20 16:51:12 crc kubenswrapper[4995]: E0120 16:51:12.592042 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff583ed2-5f45-45fe-aa25-f02872f482b1" containerName="dnsmasq-dns" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.592048 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff583ed2-5f45-45fe-aa25-f02872f482b1" containerName="dnsmasq-dns" Jan 20 16:51:12 crc kubenswrapper[4995]: E0120 16:51:12.592067 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8c780d8-786b-4f35-a3ff-22b2e9081e1d" containerName="init" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.592072 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8c780d8-786b-4f35-a3ff-22b2e9081e1d" containerName="init" Jan 20 16:51:12 crc kubenswrapper[4995]: E0120 16:51:12.592112 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff583ed2-5f45-45fe-aa25-f02872f482b1" containerName="init" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.592118 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff583ed2-5f45-45fe-aa25-f02872f482b1" containerName="init" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.592310 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff583ed2-5f45-45fe-aa25-f02872f482b1" containerName="dnsmasq-dns" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.592333 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8c780d8-786b-4f35-a3ff-22b2e9081e1d" containerName="dnsmasq-dns" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.592968 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.598156 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-rqcwf" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.598170 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.598360 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.617134 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.661838 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82097345-279c-4f86-ad0d-29cd82acf859-combined-ca-bundle\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.661944 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/82097345-279c-4f86-ad0d-29cd82acf859-openstack-config\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.661998 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/82097345-279c-4f86-ad0d-29cd82acf859-openstack-config-secret\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.662032 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pmnv\" (UniqueName: \"kubernetes.io/projected/82097345-279c-4f86-ad0d-29cd82acf859-kube-api-access-8pmnv\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.762763 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/82097345-279c-4f86-ad0d-29cd82acf859-openstack-config\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.763122 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/82097345-279c-4f86-ad0d-29cd82acf859-openstack-config-secret\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.763274 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pmnv\" (UniqueName: \"kubernetes.io/projected/82097345-279c-4f86-ad0d-29cd82acf859-kube-api-access-8pmnv\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.763369 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82097345-279c-4f86-ad0d-29cd82acf859-combined-ca-bundle\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.766294 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/82097345-279c-4f86-ad0d-29cd82acf859-openstack-config\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.770562 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82097345-279c-4f86-ad0d-29cd82acf859-combined-ca-bundle\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.770915 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/82097345-279c-4f86-ad0d-29cd82acf859-openstack-config-secret\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.786640 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pmnv\" (UniqueName: \"kubernetes.io/projected/82097345-279c-4f86-ad0d-29cd82acf859-kube-api-access-8pmnv\") pod \"openstackclient\" (UID: \"82097345-279c-4f86-ad0d-29cd82acf859\") " pod="openstack/openstackclient" Jan 20 16:51:12 crc kubenswrapper[4995]: I0120 16:51:12.913740 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 20 16:51:13 crc kubenswrapper[4995]: I0120 16:51:13.038991 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Jan 20 16:51:13 crc kubenswrapper[4995]: I0120 16:51:13.049222 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Jan 20 16:51:13 crc kubenswrapper[4995]: I0120 16:51:13.114382 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-84df7dbffb-njbnq" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Jan 20 16:51:13 crc kubenswrapper[4995]: I0120 16:51:13.321279 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 20 16:51:13 crc kubenswrapper[4995]: I0120 16:51:13.409330 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 20 16:51:14 crc kubenswrapper[4995]: I0120 16:51:14.001317 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8c780d8-786b-4f35-a3ff-22b2e9081e1d" path="/var/lib/kubelet/pods/f8c780d8-786b-4f35-a3ff-22b2e9081e1d/volumes" Jan 20 16:51:14 crc kubenswrapper[4995]: I0120 16:51:14.019242 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 20 16:51:14 crc kubenswrapper[4995]: I0120 16:51:14.060462 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 16:51:14 crc kubenswrapper[4995]: I0120 16:51:14.316845 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"82097345-279c-4f86-ad0d-29cd82acf859","Type":"ContainerStarted","Data":"b3a969c45252ac6a300a3d25c09be6887700e8f8a7067f5ecb4e6eb9523a9f07"} Jan 20 16:51:14 crc kubenswrapper[4995]: I0120 16:51:14.317313 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b3000b30-37ee-4724-a13f-71f360b1af38" containerName="cinder-scheduler" containerID="cri-o://acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d" gracePeriod=30 Jan 20 16:51:14 crc kubenswrapper[4995]: I0120 16:51:14.317422 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b3000b30-37ee-4724-a13f-71f360b1af38" containerName="probe" containerID="cri-o://3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3" gracePeriod=30 Jan 20 16:51:14 crc kubenswrapper[4995]: I0120 16:51:14.996875 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-77d6d89bf8-nkl4h" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": read tcp 10.217.0.2:52910->10.217.0.178:9311: read: connection reset by peer" Jan 20 16:51:14 crc kubenswrapper[4995]: I0120 16:51:14.996916 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-77d6d89bf8-nkl4h" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": read tcp 10.217.0.2:52904->10.217.0.178:9311: read: connection reset by peer" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.343506 4995 generic.go:334] "Generic (PLEG): container finished" podID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerID="2ac38c99dfc243cd4ea3c9b9e60c3f1697ca7adfd761b65a95f3b120e0e2a962" exitCode=0 Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.343581 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77d6d89bf8-nkl4h" event={"ID":"3ce1f610-b53c-457f-89b8-d484d02bb210","Type":"ContainerDied","Data":"2ac38c99dfc243cd4ea3c9b9e60c3f1697ca7adfd761b65a95f3b120e0e2a962"} Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.345402 4995 generic.go:334] "Generic (PLEG): container finished" podID="b3000b30-37ee-4724-a13f-71f360b1af38" containerID="3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3" exitCode=0 Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.345436 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b3000b30-37ee-4724-a13f-71f360b1af38","Type":"ContainerDied","Data":"3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3"} Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.508655 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.560385 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdhr8\" (UniqueName: \"kubernetes.io/projected/3ce1f610-b53c-457f-89b8-d484d02bb210-kube-api-access-tdhr8\") pod \"3ce1f610-b53c-457f-89b8-d484d02bb210\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.560564 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-combined-ca-bundle\") pod \"3ce1f610-b53c-457f-89b8-d484d02bb210\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.560627 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data-custom\") pod \"3ce1f610-b53c-457f-89b8-d484d02bb210\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.560682 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data\") pod \"3ce1f610-b53c-457f-89b8-d484d02bb210\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.560756 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ce1f610-b53c-457f-89b8-d484d02bb210-logs\") pod \"3ce1f610-b53c-457f-89b8-d484d02bb210\" (UID: \"3ce1f610-b53c-457f-89b8-d484d02bb210\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.561574 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ce1f610-b53c-457f-89b8-d484d02bb210-logs" (OuterVolumeSpecName: "logs") pod "3ce1f610-b53c-457f-89b8-d484d02bb210" (UID: "3ce1f610-b53c-457f-89b8-d484d02bb210"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.572128 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3ce1f610-b53c-457f-89b8-d484d02bb210" (UID: "3ce1f610-b53c-457f-89b8-d484d02bb210"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.576532 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ce1f610-b53c-457f-89b8-d484d02bb210-kube-api-access-tdhr8" (OuterVolumeSpecName: "kube-api-access-tdhr8") pod "3ce1f610-b53c-457f-89b8-d484d02bb210" (UID: "3ce1f610-b53c-457f-89b8-d484d02bb210"). InnerVolumeSpecName "kube-api-access-tdhr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.609267 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ce1f610-b53c-457f-89b8-d484d02bb210" (UID: "3ce1f610-b53c-457f-89b8-d484d02bb210"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.645262 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data" (OuterVolumeSpecName: "config-data") pod "3ce1f610-b53c-457f-89b8-d484d02bb210" (UID: "3ce1f610-b53c-457f-89b8-d484d02bb210"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.663346 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.663381 4995 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.663390 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ce1f610-b53c-457f-89b8-d484d02bb210-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.663400 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ce1f610-b53c-457f-89b8-d484d02bb210-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.663411 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdhr8\" (UniqueName: \"kubernetes.io/projected/3ce1f610-b53c-457f-89b8-d484d02bb210-kube-api-access-tdhr8\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.829255 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.967525 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data-custom\") pod \"b3000b30-37ee-4724-a13f-71f360b1af38\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.967653 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-scripts\") pod \"b3000b30-37ee-4724-a13f-71f360b1af38\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.967688 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data\") pod \"b3000b30-37ee-4724-a13f-71f360b1af38\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.967759 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3000b30-37ee-4724-a13f-71f360b1af38-etc-machine-id\") pod \"b3000b30-37ee-4724-a13f-71f360b1af38\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.967792 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trmkf\" (UniqueName: \"kubernetes.io/projected/b3000b30-37ee-4724-a13f-71f360b1af38-kube-api-access-trmkf\") pod \"b3000b30-37ee-4724-a13f-71f360b1af38\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.967832 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-combined-ca-bundle\") pod \"b3000b30-37ee-4724-a13f-71f360b1af38\" (UID: \"b3000b30-37ee-4724-a13f-71f360b1af38\") " Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.969237 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b3000b30-37ee-4724-a13f-71f360b1af38-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b3000b30-37ee-4724-a13f-71f360b1af38" (UID: "b3000b30-37ee-4724-a13f-71f360b1af38"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.973286 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3000b30-37ee-4724-a13f-71f360b1af38-kube-api-access-trmkf" (OuterVolumeSpecName: "kube-api-access-trmkf") pod "b3000b30-37ee-4724-a13f-71f360b1af38" (UID: "b3000b30-37ee-4724-a13f-71f360b1af38"). InnerVolumeSpecName "kube-api-access-trmkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.977602 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b3000b30-37ee-4724-a13f-71f360b1af38" (UID: "b3000b30-37ee-4724-a13f-71f360b1af38"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:15 crc kubenswrapper[4995]: I0120 16:51:15.980047 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-scripts" (OuterVolumeSpecName: "scripts") pod "b3000b30-37ee-4724-a13f-71f360b1af38" (UID: "b3000b30-37ee-4724-a13f-71f360b1af38"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.038375 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3000b30-37ee-4724-a13f-71f360b1af38" (UID: "b3000b30-37ee-4724-a13f-71f360b1af38"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.069701 4995 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b3000b30-37ee-4724-a13f-71f360b1af38-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.069729 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trmkf\" (UniqueName: \"kubernetes.io/projected/b3000b30-37ee-4724-a13f-71f360b1af38-kube-api-access-trmkf\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.069741 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.069749 4995 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.069758 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.118853 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data" (OuterVolumeSpecName: "config-data") pod "b3000b30-37ee-4724-a13f-71f360b1af38" (UID: "b3000b30-37ee-4724-a13f-71f360b1af38"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.171670 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3000b30-37ee-4724-a13f-71f360b1af38-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.360532 4995 generic.go:334] "Generic (PLEG): container finished" podID="b3000b30-37ee-4724-a13f-71f360b1af38" containerID="acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d" exitCode=0 Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.360617 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.360654 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b3000b30-37ee-4724-a13f-71f360b1af38","Type":"ContainerDied","Data":"acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d"} Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.361115 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b3000b30-37ee-4724-a13f-71f360b1af38","Type":"ContainerDied","Data":"73fd909ce02ef188ae3edb80d7f79d89b8372dee80b01abb728f5e92c5a15e36"} Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.361143 4995 scope.go:117] "RemoveContainer" containerID="3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.365190 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77d6d89bf8-nkl4h" event={"ID":"3ce1f610-b53c-457f-89b8-d484d02bb210","Type":"ContainerDied","Data":"949cc6adf0a0fffb57086a696658d65effd378b1027923e3666f322a9be444aa"} Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.365265 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77d6d89bf8-nkl4h" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.390293 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-77d6d89bf8-nkl4h"] Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.401167 4995 scope.go:117] "RemoveContainer" containerID="acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.404498 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-77d6d89bf8-nkl4h"] Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.417418 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.431596 4995 scope.go:117] "RemoveContainer" containerID="3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.431753 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 16:51:16 crc kubenswrapper[4995]: E0120 16:51:16.440167 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3\": container with ID starting with 3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3 not found: ID does not exist" containerID="3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.440227 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3"} err="failed to get container status \"3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3\": rpc error: code = NotFound desc = could not find container \"3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3\": container with ID starting with 3593f1426da9265b390d4132f826c9db5bf36f33dd3b4d19f293f6167fd275c3 not found: ID does not exist" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.440262 4995 scope.go:117] "RemoveContainer" containerID="acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d" Jan 20 16:51:16 crc kubenswrapper[4995]: E0120 16:51:16.441774 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d\": container with ID starting with acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d not found: ID does not exist" containerID="acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.441795 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d"} err="failed to get container status \"acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d\": rpc error: code = NotFound desc = could not find container \"acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d\": container with ID starting with acae5014959d4a204a0851254ded939d82d754aa3af6a9db473d67a60b34004d not found: ID does not exist" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.441811 4995 scope.go:117] "RemoveContainer" containerID="2ac38c99dfc243cd4ea3c9b9e60c3f1697ca7adfd761b65a95f3b120e0e2a962" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.445365 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 16:51:16 crc kubenswrapper[4995]: E0120 16:51:16.445836 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3000b30-37ee-4724-a13f-71f360b1af38" containerName="cinder-scheduler" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.445861 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3000b30-37ee-4724-a13f-71f360b1af38" containerName="cinder-scheduler" Jan 20 16:51:16 crc kubenswrapper[4995]: E0120 16:51:16.445875 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3000b30-37ee-4724-a13f-71f360b1af38" containerName="probe" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.445883 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3000b30-37ee-4724-a13f-71f360b1af38" containerName="probe" Jan 20 16:51:16 crc kubenswrapper[4995]: E0120 16:51:16.445914 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.445924 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api" Jan 20 16:51:16 crc kubenswrapper[4995]: E0120 16:51:16.445943 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api-log" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.445951 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api-log" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.446300 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.446323 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" containerName="barbican-api-log" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.446345 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3000b30-37ee-4724-a13f-71f360b1af38" containerName="probe" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.446365 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3000b30-37ee-4724-a13f-71f360b1af38" containerName="cinder-scheduler" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.447538 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.449877 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.464933 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.506888 4995 scope.go:117] "RemoveContainer" containerID="2daad3af49862ef1f64bb065a90201dbe0da8597c8ec895043af546d54ca691b" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.583629 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkhhw\" (UniqueName: \"kubernetes.io/projected/600e23cc-0af2-4f67-a17b-a69f4753f7f5-kube-api-access-tkhhw\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.583675 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/600e23cc-0af2-4f67-a17b-a69f4753f7f5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.583698 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.583761 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-config-data\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.583831 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.583851 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-scripts\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.686003 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkhhw\" (UniqueName: \"kubernetes.io/projected/600e23cc-0af2-4f67-a17b-a69f4753f7f5-kube-api-access-tkhhw\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.686055 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/600e23cc-0af2-4f67-a17b-a69f4753f7f5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.686077 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.686163 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-config-data\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.686186 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.686232 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-scripts\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.689448 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/600e23cc-0af2-4f67-a17b-a69f4753f7f5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.697433 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.699058 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.704784 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-scripts\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.705730 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkhhw\" (UniqueName: \"kubernetes.io/projected/600e23cc-0af2-4f67-a17b-a69f4753f7f5-kube-api-access-tkhhw\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.730258 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/600e23cc-0af2-4f67-a17b-a69f4753f7f5-config-data\") pod \"cinder-scheduler-0\" (UID: \"600e23cc-0af2-4f67-a17b-a69f4753f7f5\") " pod="openstack/cinder-scheduler-0" Jan 20 16:51:16 crc kubenswrapper[4995]: I0120 16:51:16.793120 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.324607 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-z4fhw"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.326172 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.333008 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.344132 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-z4fhw"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.388405 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"600e23cc-0af2-4f67-a17b-a69f4753f7f5","Type":"ContainerStarted","Data":"98beea9ee2e19d7baf7aa8d54e91637fef02839fcfe3e99d42d3d58e948ce636"} Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.399100 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27b5167-a8db-434f-95f5-12d03504b42b-operator-scripts\") pod \"nova-api-db-create-z4fhw\" (UID: \"f27b5167-a8db-434f-95f5-12d03504b42b\") " pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.399219 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2s6ph\" (UniqueName: \"kubernetes.io/projected/f27b5167-a8db-434f-95f5-12d03504b42b-kube-api-access-2s6ph\") pod \"nova-api-db-create-z4fhw\" (UID: \"f27b5167-a8db-434f-95f5-12d03504b42b\") " pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.444811 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-x6v46"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.446403 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.455551 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-5951-account-create-update-gj2vm"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.456817 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.462290 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.484318 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-x6v46"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.499007 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-5951-account-create-update-gj2vm"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.502145 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8g58\" (UniqueName: \"kubernetes.io/projected/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-kube-api-access-s8g58\") pod \"nova-cell0-db-create-x6v46\" (UID: \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\") " pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.502224 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27b5167-a8db-434f-95f5-12d03504b42b-operator-scripts\") pod \"nova-api-db-create-z4fhw\" (UID: \"f27b5167-a8db-434f-95f5-12d03504b42b\") " pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.502251 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-operator-scripts\") pod \"nova-cell0-db-create-x6v46\" (UID: \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\") " pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.502294 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db16d488-a0a4-4fd7-8662-c42fcd147308-operator-scripts\") pod \"nova-api-5951-account-create-update-gj2vm\" (UID: \"db16d488-a0a4-4fd7-8662-c42fcd147308\") " pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.502654 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvhzl\" (UniqueName: \"kubernetes.io/projected/db16d488-a0a4-4fd7-8662-c42fcd147308-kube-api-access-wvhzl\") pod \"nova-api-5951-account-create-update-gj2vm\" (UID: \"db16d488-a0a4-4fd7-8662-c42fcd147308\") " pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.502713 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2s6ph\" (UniqueName: \"kubernetes.io/projected/f27b5167-a8db-434f-95f5-12d03504b42b-kube-api-access-2s6ph\") pod \"nova-api-db-create-z4fhw\" (UID: \"f27b5167-a8db-434f-95f5-12d03504b42b\") " pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.506461 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27b5167-a8db-434f-95f5-12d03504b42b-operator-scripts\") pod \"nova-api-db-create-z4fhw\" (UID: \"f27b5167-a8db-434f-95f5-12d03504b42b\") " pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.525011 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-7z5wh"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.526189 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.539264 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-7z5wh"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.552289 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2s6ph\" (UniqueName: \"kubernetes.io/projected/f27b5167-a8db-434f-95f5-12d03504b42b-kube-api-access-2s6ph\") pod \"nova-api-db-create-z4fhw\" (UID: \"f27b5167-a8db-434f-95f5-12d03504b42b\") " pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.607004 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcfp7\" (UniqueName: \"kubernetes.io/projected/845fe01d-392a-49a0-bfea-0270f2703739-kube-api-access-fcfp7\") pod \"nova-cell1-db-create-7z5wh\" (UID: \"845fe01d-392a-49a0-bfea-0270f2703739\") " pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.607120 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8g58\" (UniqueName: \"kubernetes.io/projected/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-kube-api-access-s8g58\") pod \"nova-cell0-db-create-x6v46\" (UID: \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\") " pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.607172 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-operator-scripts\") pod \"nova-cell0-db-create-x6v46\" (UID: \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\") " pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.607209 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db16d488-a0a4-4fd7-8662-c42fcd147308-operator-scripts\") pod \"nova-api-5951-account-create-update-gj2vm\" (UID: \"db16d488-a0a4-4fd7-8662-c42fcd147308\") " pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.607276 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/845fe01d-392a-49a0-bfea-0270f2703739-operator-scripts\") pod \"nova-cell1-db-create-7z5wh\" (UID: \"845fe01d-392a-49a0-bfea-0270f2703739\") " pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.607341 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvhzl\" (UniqueName: \"kubernetes.io/projected/db16d488-a0a4-4fd7-8662-c42fcd147308-kube-api-access-wvhzl\") pod \"nova-api-5951-account-create-update-gj2vm\" (UID: \"db16d488-a0a4-4fd7-8662-c42fcd147308\") " pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.608048 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-operator-scripts\") pod \"nova-cell0-db-create-x6v46\" (UID: \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\") " pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.608786 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db16d488-a0a4-4fd7-8662-c42fcd147308-operator-scripts\") pod \"nova-api-5951-account-create-update-gj2vm\" (UID: \"db16d488-a0a4-4fd7-8662-c42fcd147308\") " pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.621777 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-d1ac-account-create-update-5v47x"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.623450 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.628508 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.630227 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvhzl\" (UniqueName: \"kubernetes.io/projected/db16d488-a0a4-4fd7-8662-c42fcd147308-kube-api-access-wvhzl\") pod \"nova-api-5951-account-create-update-gj2vm\" (UID: \"db16d488-a0a4-4fd7-8662-c42fcd147308\") " pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.631530 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8g58\" (UniqueName: \"kubernetes.io/projected/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-kube-api-access-s8g58\") pod \"nova-cell0-db-create-x6v46\" (UID: \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\") " pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.634469 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-d1ac-account-create-update-5v47x"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.717609 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcfp7\" (UniqueName: \"kubernetes.io/projected/845fe01d-392a-49a0-bfea-0270f2703739-kube-api-access-fcfp7\") pod \"nova-cell1-db-create-7z5wh\" (UID: \"845fe01d-392a-49a0-bfea-0270f2703739\") " pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.717694 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-operator-scripts\") pod \"nova-cell0-d1ac-account-create-update-5v47x\" (UID: \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\") " pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.717983 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnv2v\" (UniqueName: \"kubernetes.io/projected/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-kube-api-access-dnv2v\") pod \"nova-cell0-d1ac-account-create-update-5v47x\" (UID: \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\") " pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.718150 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/845fe01d-392a-49a0-bfea-0270f2703739-operator-scripts\") pod \"nova-cell1-db-create-7z5wh\" (UID: \"845fe01d-392a-49a0-bfea-0270f2703739\") " pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.719066 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/845fe01d-392a-49a0-bfea-0270f2703739-operator-scripts\") pod \"nova-cell1-db-create-7z5wh\" (UID: \"845fe01d-392a-49a0-bfea-0270f2703739\") " pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.736617 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcfp7\" (UniqueName: \"kubernetes.io/projected/845fe01d-392a-49a0-bfea-0270f2703739-kube-api-access-fcfp7\") pod \"nova-cell1-db-create-7z5wh\" (UID: \"845fe01d-392a-49a0-bfea-0270f2703739\") " pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.776700 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.798031 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.819815 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnv2v\" (UniqueName: \"kubernetes.io/projected/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-kube-api-access-dnv2v\") pod \"nova-cell0-d1ac-account-create-update-5v47x\" (UID: \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\") " pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.819959 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-operator-scripts\") pod \"nova-cell0-d1ac-account-create-update-5v47x\" (UID: \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\") " pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.820582 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-operator-scripts\") pod \"nova-cell0-d1ac-account-create-update-5v47x\" (UID: \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\") " pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.821357 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-d336-account-create-update-vv9vm"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.823539 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.824605 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.826674 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.832032 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d336-account-create-update-vv9vm"] Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.839558 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnv2v\" (UniqueName: \"kubernetes.io/projected/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-kube-api-access-dnv2v\") pod \"nova-cell0-d1ac-account-create-update-5v47x\" (UID: \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\") " pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.916232 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.921484 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm9r8\" (UniqueName: \"kubernetes.io/projected/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-kube-api-access-sm9r8\") pod \"nova-cell1-d336-account-create-update-vv9vm\" (UID: \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\") " pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.921575 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-operator-scripts\") pod \"nova-cell1-d336-account-create-update-vv9vm\" (UID: \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\") " pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:17 crc kubenswrapper[4995]: I0120 16:51:17.944464 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.012954 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ce1f610-b53c-457f-89b8-d484d02bb210" path="/var/lib/kubelet/pods/3ce1f610-b53c-457f-89b8-d484d02bb210/volumes" Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.013949 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3000b30-37ee-4724-a13f-71f360b1af38" path="/var/lib/kubelet/pods/b3000b30-37ee-4724-a13f-71f360b1af38/volumes" Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.027241 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm9r8\" (UniqueName: \"kubernetes.io/projected/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-kube-api-access-sm9r8\") pod \"nova-cell1-d336-account-create-update-vv9vm\" (UID: \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\") " pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.027331 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-operator-scripts\") pod \"nova-cell1-d336-account-create-update-vv9vm\" (UID: \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\") " pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.028119 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-operator-scripts\") pod \"nova-cell1-d336-account-create-update-vv9vm\" (UID: \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\") " pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.062852 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm9r8\" (UniqueName: \"kubernetes.io/projected/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-kube-api-access-sm9r8\") pod \"nova-cell1-d336-account-create-update-vv9vm\" (UID: \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\") " pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.174699 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.449155 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-z4fhw"] Jan 20 16:51:18 crc kubenswrapper[4995]: W0120 16:51:18.512186 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf27b5167_a8db_434f_95f5_12d03504b42b.slice/crio-c96e241a022dfccfee68297fb8aaac4e8a52734a470a51302609056760b97064 WatchSource:0}: Error finding container c96e241a022dfccfee68297fb8aaac4e8a52734a470a51302609056760b97064: Status 404 returned error can't find the container with id c96e241a022dfccfee68297fb8aaac4e8a52734a470a51302609056760b97064 Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.620908 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-x6v46"] Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.732884 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-7z5wh"] Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.879677 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-d1ac-account-create-update-5v47x"] Jan 20 16:51:18 crc kubenswrapper[4995]: I0120 16:51:18.940369 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-5951-account-create-update-gj2vm"] Jan 20 16:51:19 crc kubenswrapper[4995]: W0120 16:51:19.066358 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod782c8f7d_5d0a_4221_a7e4_73b8aab7d361.slice/crio-739191de7954306f86d1f7732b2bab6f2292827509e13cc3d02abe1d6d86981f WatchSource:0}: Error finding container 739191de7954306f86d1f7732b2bab6f2292827509e13cc3d02abe1d6d86981f: Status 404 returned error can't find the container with id 739191de7954306f86d1f7732b2bab6f2292827509e13cc3d02abe1d6d86981f Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.067076 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d336-account-create-update-vv9vm"] Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.422256 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" event={"ID":"135aaf1f-990f-4d8b-bde7-32f2ddb702b4","Type":"ContainerStarted","Data":"20c5ba54f56ada7d427c38fad30bc8a763abf84f799b220f15b8e8fdaf593788"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.425384 4995 generic.go:334] "Generic (PLEG): container finished" podID="845fe01d-392a-49a0-bfea-0270f2703739" containerID="44c8c76c8ad789f687dc214079804e8eacb6b94af9d374dc7e2f430cdfcff7a4" exitCode=0 Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.425469 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7z5wh" event={"ID":"845fe01d-392a-49a0-bfea-0270f2703739","Type":"ContainerDied","Data":"44c8c76c8ad789f687dc214079804e8eacb6b94af9d374dc7e2f430cdfcff7a4"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.425515 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7z5wh" event={"ID":"845fe01d-392a-49a0-bfea-0270f2703739","Type":"ContainerStarted","Data":"973978e97c8afd95c0bbe558cde97d2cfa1bc1a0132c570cca9c6d6cccc283c6"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.448695 4995 generic.go:334] "Generic (PLEG): container finished" podID="a4c73453-c3fc-46b3-8a8a-ddd134348b8e" containerID="d61e58090310c325efb19963312ec469e053d810d08cf0d420d70429a17f9d8b" exitCode=0 Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.448785 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-x6v46" event={"ID":"a4c73453-c3fc-46b3-8a8a-ddd134348b8e","Type":"ContainerDied","Data":"d61e58090310c325efb19963312ec469e053d810d08cf0d420d70429a17f9d8b"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.448810 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-x6v46" event={"ID":"a4c73453-c3fc-46b3-8a8a-ddd134348b8e","Type":"ContainerStarted","Data":"36f268d78f7fd2b5680fea763724941110a9b2bf3ad120997cdbef6d1c5db853"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.452843 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d336-account-create-update-vv9vm" event={"ID":"782c8f7d-5d0a-4221-a7e4-73b8aab7d361","Type":"ContainerStarted","Data":"739191de7954306f86d1f7732b2bab6f2292827509e13cc3d02abe1d6d86981f"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.454123 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"600e23cc-0af2-4f67-a17b-a69f4753f7f5","Type":"ContainerStarted","Data":"b592daad8f6e6b466d949b1ca7626e220b4e5e8caa8f0b114effb6599cbf732e"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.455154 4995 generic.go:334] "Generic (PLEG): container finished" podID="f27b5167-a8db-434f-95f5-12d03504b42b" containerID="3ae81eea5613b74406b7f39c19ae3c6d1485495d5904291e06cf17c9b57a240e" exitCode=0 Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.455204 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-z4fhw" event={"ID":"f27b5167-a8db-434f-95f5-12d03504b42b","Type":"ContainerDied","Data":"3ae81eea5613b74406b7f39c19ae3c6d1485495d5904291e06cf17c9b57a240e"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.455219 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-z4fhw" event={"ID":"f27b5167-a8db-434f-95f5-12d03504b42b","Type":"ContainerStarted","Data":"c96e241a022dfccfee68297fb8aaac4e8a52734a470a51302609056760b97064"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.459876 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5951-account-create-update-gj2vm" event={"ID":"db16d488-a0a4-4fd7-8662-c42fcd147308","Type":"ContainerStarted","Data":"4ace6057861f428f1cf2f72dbc35e89f8a2c60376bd2afb1a87773261047e2bb"} Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.657787 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6f5d884999-jxjqt"] Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.659480 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.664777 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.664884 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.664999 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.674190 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6f5d884999-jxjqt"] Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.816600 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-internal-tls-certs\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.816649 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6da8401d-a15a-4ff6-ab0f-11cbafff0855-log-httpd\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.816694 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-combined-ca-bundle\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.816731 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-config-data\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.816770 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6da8401d-a15a-4ff6-ab0f-11cbafff0855-etc-swift\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.816789 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6da8401d-a15a-4ff6-ab0f-11cbafff0855-run-httpd\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.816833 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dswq\" (UniqueName: \"kubernetes.io/projected/6da8401d-a15a-4ff6-ab0f-11cbafff0855-kube-api-access-7dswq\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.816930 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-public-tls-certs\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.918660 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-config-data\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.918720 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6da8401d-a15a-4ff6-ab0f-11cbafff0855-etc-swift\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.918740 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6da8401d-a15a-4ff6-ab0f-11cbafff0855-run-httpd\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.918773 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dswq\" (UniqueName: \"kubernetes.io/projected/6da8401d-a15a-4ff6-ab0f-11cbafff0855-kube-api-access-7dswq\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.918862 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-public-tls-certs\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.918895 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-internal-tls-certs\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.918913 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6da8401d-a15a-4ff6-ab0f-11cbafff0855-log-httpd\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.918942 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-combined-ca-bundle\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.920968 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6da8401d-a15a-4ff6-ab0f-11cbafff0855-run-httpd\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.921512 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6da8401d-a15a-4ff6-ab0f-11cbafff0855-log-httpd\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.927983 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-combined-ca-bundle\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.928051 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-internal-tls-certs\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.928525 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-public-tls-certs\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.929314 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6da8401d-a15a-4ff6-ab0f-11cbafff0855-etc-swift\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.929544 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6da8401d-a15a-4ff6-ab0f-11cbafff0855-config-data\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.941940 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dswq\" (UniqueName: \"kubernetes.io/projected/6da8401d-a15a-4ff6-ab0f-11cbafff0855-kube-api-access-7dswq\") pod \"swift-proxy-6f5d884999-jxjqt\" (UID: \"6da8401d-a15a-4ff6-ab0f-11cbafff0855\") " pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:19 crc kubenswrapper[4995]: I0120 16:51:19.982258 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.474647 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"600e23cc-0af2-4f67-a17b-a69f4753f7f5","Type":"ContainerStarted","Data":"9033d6f4400582935f95d4679864bcc1d3e2c5ac479844a9b0300c53e7294c03"} Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.482089 4995 generic.go:334] "Generic (PLEG): container finished" podID="db16d488-a0a4-4fd7-8662-c42fcd147308" containerID="32f5af5984a6bd6bbe66712a3b4d52b4fb3364a2f23bacb8fe8ebb7cec35b538" exitCode=0 Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.482200 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5951-account-create-update-gj2vm" event={"ID":"db16d488-a0a4-4fd7-8662-c42fcd147308","Type":"ContainerDied","Data":"32f5af5984a6bd6bbe66712a3b4d52b4fb3364a2f23bacb8fe8ebb7cec35b538"} Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.484219 4995 generic.go:334] "Generic (PLEG): container finished" podID="135aaf1f-990f-4d8b-bde7-32f2ddb702b4" containerID="f215ae9fb06e81304fa3500eca8d311d65749c7a2f1e93f9cfcbc6e929427adf" exitCode=0 Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.484285 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" event={"ID":"135aaf1f-990f-4d8b-bde7-32f2ddb702b4","Type":"ContainerDied","Data":"f215ae9fb06e81304fa3500eca8d311d65749c7a2f1e93f9cfcbc6e929427adf"} Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.485814 4995 generic.go:334] "Generic (PLEG): container finished" podID="782c8f7d-5d0a-4221-a7e4-73b8aab7d361" containerID="6737e76540a1b334227e803b83de5ce933e9cdb414ee98a368a83481c3da1d48" exitCode=0 Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.486031 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d336-account-create-update-vv9vm" event={"ID":"782c8f7d-5d0a-4221-a7e4-73b8aab7d361","Type":"ContainerDied","Data":"6737e76540a1b334227e803b83de5ce933e9cdb414ee98a368a83481c3da1d48"} Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.489588 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.489571994 podStartE2EDuration="4.489571994s" podCreationTimestamp="2026-01-20 16:51:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:51:20.487771275 +0000 UTC m=+1198.732376091" watchObservedRunningTime="2026-01-20 16:51:20.489571994 +0000 UTC m=+1198.734176800" Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.619715 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6f5d884999-jxjqt"] Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.644396 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 20 16:51:20 crc kubenswrapper[4995]: I0120 16:51:20.960471 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.051904 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-operator-scripts\") pod \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\" (UID: \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\") " Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.052002 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8g58\" (UniqueName: \"kubernetes.io/projected/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-kube-api-access-s8g58\") pod \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\" (UID: \"a4c73453-c3fc-46b3-8a8a-ddd134348b8e\") " Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.053206 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a4c73453-c3fc-46b3-8a8a-ddd134348b8e" (UID: "a4c73453-c3fc-46b3-8a8a-ddd134348b8e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.053419 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.069359 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-kube-api-access-s8g58" (OuterVolumeSpecName: "kube-api-access-s8g58") pod "a4c73453-c3fc-46b3-8a8a-ddd134348b8e" (UID: "a4c73453-c3fc-46b3-8a8a-ddd134348b8e"). InnerVolumeSpecName "kube-api-access-s8g58". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.157556 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8g58\" (UniqueName: \"kubernetes.io/projected/a4c73453-c3fc-46b3-8a8a-ddd134348b8e-kube-api-access-s8g58\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.377200 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.396253 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.464866 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2s6ph\" (UniqueName: \"kubernetes.io/projected/f27b5167-a8db-434f-95f5-12d03504b42b-kube-api-access-2s6ph\") pod \"f27b5167-a8db-434f-95f5-12d03504b42b\" (UID: \"f27b5167-a8db-434f-95f5-12d03504b42b\") " Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.465150 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27b5167-a8db-434f-95f5-12d03504b42b-operator-scripts\") pod \"f27b5167-a8db-434f-95f5-12d03504b42b\" (UID: \"f27b5167-a8db-434f-95f5-12d03504b42b\") " Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.473012 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f27b5167-a8db-434f-95f5-12d03504b42b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f27b5167-a8db-434f-95f5-12d03504b42b" (UID: "f27b5167-a8db-434f-95f5-12d03504b42b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.502995 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.503267 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="ceilometer-central-agent" containerID="cri-o://c0ac39e33abbf83c5bba650b4b43570ca2c39a8140935e0a296726ed2caf4666" gracePeriod=30 Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.503361 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f27b5167-a8db-434f-95f5-12d03504b42b-kube-api-access-2s6ph" (OuterVolumeSpecName: "kube-api-access-2s6ph") pod "f27b5167-a8db-434f-95f5-12d03504b42b" (UID: "f27b5167-a8db-434f-95f5-12d03504b42b"). InnerVolumeSpecName "kube-api-access-2s6ph". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.503447 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="sg-core" containerID="cri-o://f0525f519fbca6c22b729b91d5a8e776a9fdb99a7eeb6b16f8a156b4c888f7e5" gracePeriod=30 Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.503539 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="ceilometer-notification-agent" containerID="cri-o://44e4ae161bb59b4e914f8e50005b6876716285b0fa11f2c3e7bb0c4c54f4dbe5" gracePeriod=30 Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.503593 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="proxy-httpd" containerID="cri-o://46652561d3e77380839bf1829162c026f29cf74bfe54f1a795c45b01a62521bd" gracePeriod=30 Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.529847 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-z4fhw" event={"ID":"f27b5167-a8db-434f-95f5-12d03504b42b","Type":"ContainerDied","Data":"c96e241a022dfccfee68297fb8aaac4e8a52734a470a51302609056760b97064"} Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.529886 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c96e241a022dfccfee68297fb8aaac4e8a52734a470a51302609056760b97064" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.529976 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-z4fhw" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.547687 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6f5d884999-jxjqt" event={"ID":"6da8401d-a15a-4ff6-ab0f-11cbafff0855","Type":"ContainerStarted","Data":"f8a7e9e0106591d78eb0b12ef39fb0d2ae76d27c3c319705acddba5849660770"} Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.547729 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6f5d884999-jxjqt" event={"ID":"6da8401d-a15a-4ff6-ab0f-11cbafff0855","Type":"ContainerStarted","Data":"c0a51683f822828ebf3f61ef21fd1d675989e24d51bf460682ce1e96a6b6a56f"} Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.548897 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.548923 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.551164 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-7z5wh" event={"ID":"845fe01d-392a-49a0-bfea-0270f2703739","Type":"ContainerDied","Data":"973978e97c8afd95c0bbe558cde97d2cfa1bc1a0132c570cca9c6d6cccc283c6"} Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.551188 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="973978e97c8afd95c0bbe558cde97d2cfa1bc1a0132c570cca9c6d6cccc283c6" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.551232 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-7z5wh" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.552736 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-x6v46" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.555272 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-x6v46" event={"ID":"a4c73453-c3fc-46b3-8a8a-ddd134348b8e","Type":"ContainerDied","Data":"36f268d78f7fd2b5680fea763724941110a9b2bf3ad120997cdbef6d1c5db853"} Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.555317 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36f268d78f7fd2b5680fea763724941110a9b2bf3ad120997cdbef6d1c5db853" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.566801 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcfp7\" (UniqueName: \"kubernetes.io/projected/845fe01d-392a-49a0-bfea-0270f2703739-kube-api-access-fcfp7\") pod \"845fe01d-392a-49a0-bfea-0270f2703739\" (UID: \"845fe01d-392a-49a0-bfea-0270f2703739\") " Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.566838 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/845fe01d-392a-49a0-bfea-0270f2703739-operator-scripts\") pod \"845fe01d-392a-49a0-bfea-0270f2703739\" (UID: \"845fe01d-392a-49a0-bfea-0270f2703739\") " Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.567851 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2s6ph\" (UniqueName: \"kubernetes.io/projected/f27b5167-a8db-434f-95f5-12d03504b42b-kube-api-access-2s6ph\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.567868 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f27b5167-a8db-434f-95f5-12d03504b42b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.568801 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/845fe01d-392a-49a0-bfea-0270f2703739-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "845fe01d-392a-49a0-bfea-0270f2703739" (UID: "845fe01d-392a-49a0-bfea-0270f2703739"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.590937 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/845fe01d-392a-49a0-bfea-0270f2703739-kube-api-access-fcfp7" (OuterVolumeSpecName: "kube-api-access-fcfp7") pod "845fe01d-392a-49a0-bfea-0270f2703739" (UID: "845fe01d-392a-49a0-bfea-0270f2703739"). InnerVolumeSpecName "kube-api-access-fcfp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.617855 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.184:3000/\": read tcp 10.217.0.2:41158->10.217.0.184:3000: read: connection reset by peer" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.672826 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcfp7\" (UniqueName: \"kubernetes.io/projected/845fe01d-392a-49a0-bfea-0270f2703739-kube-api-access-fcfp7\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.672854 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/845fe01d-392a-49a0-bfea-0270f2703739-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:21 crc kubenswrapper[4995]: I0120 16:51:21.796614 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.022328 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6f5d884999-jxjqt" podStartSLOduration=3.022312637 podStartE2EDuration="3.022312637s" podCreationTimestamp="2026-01-20 16:51:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:51:21.591097953 +0000 UTC m=+1199.835702759" watchObservedRunningTime="2026-01-20 16:51:22.022312637 +0000 UTC m=+1200.266917443" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.260775 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.266382 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.272692 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.442191 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-operator-scripts\") pod \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\" (UID: \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\") " Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.442482 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sm9r8\" (UniqueName: \"kubernetes.io/projected/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-kube-api-access-sm9r8\") pod \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\" (UID: \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\") " Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.442626 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db16d488-a0a4-4fd7-8662-c42fcd147308-operator-scripts\") pod \"db16d488-a0a4-4fd7-8662-c42fcd147308\" (UID: \"db16d488-a0a4-4fd7-8662-c42fcd147308\") " Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.442788 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnv2v\" (UniqueName: \"kubernetes.io/projected/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-kube-api-access-dnv2v\") pod \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\" (UID: \"135aaf1f-990f-4d8b-bde7-32f2ddb702b4\") " Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.442855 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-operator-scripts\") pod \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\" (UID: \"782c8f7d-5d0a-4221-a7e4-73b8aab7d361\") " Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.442998 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvhzl\" (UniqueName: \"kubernetes.io/projected/db16d488-a0a4-4fd7-8662-c42fcd147308-kube-api-access-wvhzl\") pod \"db16d488-a0a4-4fd7-8662-c42fcd147308\" (UID: \"db16d488-a0a4-4fd7-8662-c42fcd147308\") " Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.443097 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "135aaf1f-990f-4d8b-bde7-32f2ddb702b4" (UID: "135aaf1f-990f-4d8b-bde7-32f2ddb702b4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.443564 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db16d488-a0a4-4fd7-8662-c42fcd147308-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "db16d488-a0a4-4fd7-8662-c42fcd147308" (UID: "db16d488-a0a4-4fd7-8662-c42fcd147308"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.444605 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "782c8f7d-5d0a-4221-a7e4-73b8aab7d361" (UID: "782c8f7d-5d0a-4221-a7e4-73b8aab7d361"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.445564 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db16d488-a0a4-4fd7-8662-c42fcd147308-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.445729 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.445833 4995 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.454000 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db16d488-a0a4-4fd7-8662-c42fcd147308-kube-api-access-wvhzl" (OuterVolumeSpecName: "kube-api-access-wvhzl") pod "db16d488-a0a4-4fd7-8662-c42fcd147308" (UID: "db16d488-a0a4-4fd7-8662-c42fcd147308"). InnerVolumeSpecName "kube-api-access-wvhzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.453991 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-kube-api-access-sm9r8" (OuterVolumeSpecName: "kube-api-access-sm9r8") pod "782c8f7d-5d0a-4221-a7e4-73b8aab7d361" (UID: "782c8f7d-5d0a-4221-a7e4-73b8aab7d361"). InnerVolumeSpecName "kube-api-access-sm9r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.454401 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-kube-api-access-dnv2v" (OuterVolumeSpecName: "kube-api-access-dnv2v") pod "135aaf1f-990f-4d8b-bde7-32f2ddb702b4" (UID: "135aaf1f-990f-4d8b-bde7-32f2ddb702b4"). InnerVolumeSpecName "kube-api-access-dnv2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.547307 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnv2v\" (UniqueName: \"kubernetes.io/projected/135aaf1f-990f-4d8b-bde7-32f2ddb702b4-kube-api-access-dnv2v\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.547341 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvhzl\" (UniqueName: \"kubernetes.io/projected/db16d488-a0a4-4fd7-8662-c42fcd147308-kube-api-access-wvhzl\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.547354 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sm9r8\" (UniqueName: \"kubernetes.io/projected/782c8f7d-5d0a-4221-a7e4-73b8aab7d361-kube-api-access-sm9r8\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.563008 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.563004 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-d1ac-account-create-update-5v47x" event={"ID":"135aaf1f-990f-4d8b-bde7-32f2ddb702b4","Type":"ContainerDied","Data":"20c5ba54f56ada7d427c38fad30bc8a763abf84f799b220f15b8e8fdaf593788"} Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.563074 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20c5ba54f56ada7d427c38fad30bc8a763abf84f799b220f15b8e8fdaf593788" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.565900 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6f5d884999-jxjqt" event={"ID":"6da8401d-a15a-4ff6-ab0f-11cbafff0855","Type":"ContainerStarted","Data":"3e6f7588cc7d74f0db2052626c1689f6958690402935508d2839b16862487f79"} Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.568882 4995 generic.go:334] "Generic (PLEG): container finished" podID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerID="46652561d3e77380839bf1829162c026f29cf74bfe54f1a795c45b01a62521bd" exitCode=0 Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.568914 4995 generic.go:334] "Generic (PLEG): container finished" podID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerID="f0525f519fbca6c22b729b91d5a8e776a9fdb99a7eeb6b16f8a156b4c888f7e5" exitCode=2 Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.568926 4995 generic.go:334] "Generic (PLEG): container finished" podID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerID="c0ac39e33abbf83c5bba650b4b43570ca2c39a8140935e0a296726ed2caf4666" exitCode=0 Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.568971 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerDied","Data":"46652561d3e77380839bf1829162c026f29cf74bfe54f1a795c45b01a62521bd"} Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.568998 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerDied","Data":"f0525f519fbca6c22b729b91d5a8e776a9fdb99a7eeb6b16f8a156b4c888f7e5"} Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.569013 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerDied","Data":"c0ac39e33abbf83c5bba650b4b43570ca2c39a8140935e0a296726ed2caf4666"} Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.570886 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d336-account-create-update-vv9vm" event={"ID":"782c8f7d-5d0a-4221-a7e4-73b8aab7d361","Type":"ContainerDied","Data":"739191de7954306f86d1f7732b2bab6f2292827509e13cc3d02abe1d6d86981f"} Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.570930 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="739191de7954306f86d1f7732b2bab6f2292827509e13cc3d02abe1d6d86981f" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.570931 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d336-account-create-update-vv9vm" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.574600 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-5951-account-create-update-gj2vm" Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.575906 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-5951-account-create-update-gj2vm" event={"ID":"db16d488-a0a4-4fd7-8662-c42fcd147308","Type":"ContainerDied","Data":"4ace6057861f428f1cf2f72dbc35e89f8a2c60376bd2afb1a87773261047e2bb"} Jan 20 16:51:22 crc kubenswrapper[4995]: I0120 16:51:22.575944 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ace6057861f428f1cf2f72dbc35e89f8a2c60376bd2afb1a87773261047e2bb" Jan 20 16:51:23 crc kubenswrapper[4995]: I0120 16:51:23.114766 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-84df7dbffb-njbnq" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Jan 20 16:51:25 crc kubenswrapper[4995]: I0120 16:51:25.600494 4995 generic.go:334] "Generic (PLEG): container finished" podID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerID="44e4ae161bb59b4e914f8e50005b6876716285b0fa11f2c3e7bb0c4c54f4dbe5" exitCode=0 Jan 20 16:51:25 crc kubenswrapper[4995]: I0120 16:51:25.600535 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerDied","Data":"44e4ae161bb59b4e914f8e50005b6876716285b0fa11f2c3e7bb0c4c54f4dbe5"} Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.017247 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.634393 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.634593 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="8ac1d023-57b0-4926-9511-200b094b70f7" containerName="watcher-decision-engine" containerID="cri-o://ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc" gracePeriod=30 Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918122 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5c6sb"] Jan 20 16:51:27 crc kubenswrapper[4995]: E0120 16:51:27.918488 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db16d488-a0a4-4fd7-8662-c42fcd147308" containerName="mariadb-account-create-update" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918516 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="db16d488-a0a4-4fd7-8662-c42fcd147308" containerName="mariadb-account-create-update" Jan 20 16:51:27 crc kubenswrapper[4995]: E0120 16:51:27.918530 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4c73453-c3fc-46b3-8a8a-ddd134348b8e" containerName="mariadb-database-create" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918535 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4c73453-c3fc-46b3-8a8a-ddd134348b8e" containerName="mariadb-database-create" Jan 20 16:51:27 crc kubenswrapper[4995]: E0120 16:51:27.918543 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f27b5167-a8db-434f-95f5-12d03504b42b" containerName="mariadb-database-create" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918551 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f27b5167-a8db-434f-95f5-12d03504b42b" containerName="mariadb-database-create" Jan 20 16:51:27 crc kubenswrapper[4995]: E0120 16:51:27.918561 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="782c8f7d-5d0a-4221-a7e4-73b8aab7d361" containerName="mariadb-account-create-update" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918567 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="782c8f7d-5d0a-4221-a7e4-73b8aab7d361" containerName="mariadb-account-create-update" Jan 20 16:51:27 crc kubenswrapper[4995]: E0120 16:51:27.918576 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="845fe01d-392a-49a0-bfea-0270f2703739" containerName="mariadb-database-create" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918581 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="845fe01d-392a-49a0-bfea-0270f2703739" containerName="mariadb-database-create" Jan 20 16:51:27 crc kubenswrapper[4995]: E0120 16:51:27.918605 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="135aaf1f-990f-4d8b-bde7-32f2ddb702b4" containerName="mariadb-account-create-update" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918610 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="135aaf1f-990f-4d8b-bde7-32f2ddb702b4" containerName="mariadb-account-create-update" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918812 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="135aaf1f-990f-4d8b-bde7-32f2ddb702b4" containerName="mariadb-account-create-update" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918832 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f27b5167-a8db-434f-95f5-12d03504b42b" containerName="mariadb-database-create" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918842 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="845fe01d-392a-49a0-bfea-0270f2703739" containerName="mariadb-database-create" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918855 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="782c8f7d-5d0a-4221-a7e4-73b8aab7d361" containerName="mariadb-account-create-update" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918871 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="db16d488-a0a4-4fd7-8662-c42fcd147308" containerName="mariadb-account-create-update" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.918881 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4c73453-c3fc-46b3-8a8a-ddd134348b8e" containerName="mariadb-database-create" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.919553 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.921910 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.924445 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-s86mn" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.925240 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.937064 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5c6sb"] Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.951835 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.952092 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-config-data\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.952275 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-scripts\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:27 crc kubenswrapper[4995]: I0120 16:51:27.952349 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7s2h9\" (UniqueName: \"kubernetes.io/projected/6f580cd7-fea0-4fb0-a858-74e2deb15c87-kube-api-access-7s2h9\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:28 crc kubenswrapper[4995]: I0120 16:51:28.058124 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-scripts\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:28 crc kubenswrapper[4995]: I0120 16:51:28.058179 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7s2h9\" (UniqueName: \"kubernetes.io/projected/6f580cd7-fea0-4fb0-a858-74e2deb15c87-kube-api-access-7s2h9\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:28 crc kubenswrapper[4995]: I0120 16:51:28.058246 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:28 crc kubenswrapper[4995]: I0120 16:51:28.058291 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-config-data\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:28 crc kubenswrapper[4995]: I0120 16:51:28.064790 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-config-data\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:28 crc kubenswrapper[4995]: I0120 16:51:28.065419 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-scripts\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:28 crc kubenswrapper[4995]: I0120 16:51:28.066837 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:28 crc kubenswrapper[4995]: I0120 16:51:28.077139 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7s2h9\" (UniqueName: \"kubernetes.io/projected/6f580cd7-fea0-4fb0-a858-74e2deb15c87-kube-api-access-7s2h9\") pod \"nova-cell0-conductor-db-sync-5c6sb\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:28 crc kubenswrapper[4995]: I0120 16:51:28.238468 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.443097 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.482764 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-scripts\") pod \"1f72d75c-837a-45db-9879-b2fcda5ff029\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.482818 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-combined-ca-bundle\") pod \"1f72d75c-837a-45db-9879-b2fcda5ff029\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.482926 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-log-httpd\") pod \"1f72d75c-837a-45db-9879-b2fcda5ff029\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.482974 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-sg-core-conf-yaml\") pod \"1f72d75c-837a-45db-9879-b2fcda5ff029\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.483023 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tldmx\" (UniqueName: \"kubernetes.io/projected/1f72d75c-837a-45db-9879-b2fcda5ff029-kube-api-access-tldmx\") pod \"1f72d75c-837a-45db-9879-b2fcda5ff029\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.483531 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1f72d75c-837a-45db-9879-b2fcda5ff029" (UID: "1f72d75c-837a-45db-9879-b2fcda5ff029"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.483049 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-run-httpd\") pod \"1f72d75c-837a-45db-9879-b2fcda5ff029\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.483855 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-config-data\") pod \"1f72d75c-837a-45db-9879-b2fcda5ff029\" (UID: \"1f72d75c-837a-45db-9879-b2fcda5ff029\") " Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.484151 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1f72d75c-837a-45db-9879-b2fcda5ff029" (UID: "1f72d75c-837a-45db-9879-b2fcda5ff029"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.484227 4995 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.491474 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f72d75c-837a-45db-9879-b2fcda5ff029-kube-api-access-tldmx" (OuterVolumeSpecName: "kube-api-access-tldmx") pod "1f72d75c-837a-45db-9879-b2fcda5ff029" (UID: "1f72d75c-837a-45db-9879-b2fcda5ff029"). InnerVolumeSpecName "kube-api-access-tldmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.499290 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-scripts" (OuterVolumeSpecName: "scripts") pod "1f72d75c-837a-45db-9879-b2fcda5ff029" (UID: "1f72d75c-837a-45db-9879-b2fcda5ff029"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.581829 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1f72d75c-837a-45db-9879-b2fcda5ff029" (UID: "1f72d75c-837a-45db-9879-b2fcda5ff029"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.585551 4995 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1f72d75c-837a-45db-9879-b2fcda5ff029-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.585574 4995 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.585587 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tldmx\" (UniqueName: \"kubernetes.io/projected/1f72d75c-837a-45db-9879-b2fcda5ff029-kube-api-access-tldmx\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.585598 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.595911 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5c6sb"] Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.619246 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1f72d75c-837a-45db-9879-b2fcda5ff029" (UID: "1f72d75c-837a-45db-9879-b2fcda5ff029"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.641222 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1f72d75c-837a-45db-9879-b2fcda5ff029","Type":"ContainerDied","Data":"0d5331e02d7462fd81beb9148cd57105a89be33b43352e6313bbaeb9496c242e"} Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.641288 4995 scope.go:117] "RemoveContainer" containerID="46652561d3e77380839bf1829162c026f29cf74bfe54f1a795c45b01a62521bd" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.641438 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.644354 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5c6sb" event={"ID":"6f580cd7-fea0-4fb0-a858-74e2deb15c87","Type":"ContainerStarted","Data":"179a552976edea38e064f45b8a16c48b1f2f79e8cab6d8a1ab000c50876b6c73"} Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.645890 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"82097345-279c-4f86-ad0d-29cd82acf859","Type":"ContainerStarted","Data":"295ad30641a7926116b35a4110b8b391e5bc414e2f3788d5ca4bb9456656fb12"} Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.661619 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-config-data" (OuterVolumeSpecName: "config-data") pod "1f72d75c-837a-45db-9879-b2fcda5ff029" (UID: "1f72d75c-837a-45db-9879-b2fcda5ff029"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.667017 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.134652559 podStartE2EDuration="17.66699382s" podCreationTimestamp="2026-01-20 16:51:12 +0000 UTC" firstStartedPulling="2026-01-20 16:51:13.421704871 +0000 UTC m=+1191.666309687" lastFinishedPulling="2026-01-20 16:51:28.954046142 +0000 UTC m=+1207.198650948" observedRunningTime="2026-01-20 16:51:29.664291357 +0000 UTC m=+1207.908896163" watchObservedRunningTime="2026-01-20 16:51:29.66699382 +0000 UTC m=+1207.911598626" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.670223 4995 scope.go:117] "RemoveContainer" containerID="f0525f519fbca6c22b729b91d5a8e776a9fdb99a7eeb6b16f8a156b4c888f7e5" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.686241 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.686267 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f72d75c-837a-45db-9879-b2fcda5ff029-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.686439 4995 scope.go:117] "RemoveContainer" containerID="44e4ae161bb59b4e914f8e50005b6876716285b0fa11f2c3e7bb0c4c54f4dbe5" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.711130 4995 scope.go:117] "RemoveContainer" containerID="c0ac39e33abbf83c5bba650b4b43570ca2c39a8140935e0a296726ed2caf4666" Jan 20 16:51:29 crc kubenswrapper[4995]: I0120 16:51:29.983584 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.002346 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.005685 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.008027 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:30 crc kubenswrapper[4995]: E0120 16:51:30.008479 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="ceilometer-notification-agent" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.008501 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="ceilometer-notification-agent" Jan 20 16:51:30 crc kubenswrapper[4995]: E0120 16:51:30.008524 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="sg-core" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.008532 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="sg-core" Jan 20 16:51:30 crc kubenswrapper[4995]: E0120 16:51:30.008548 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="proxy-httpd" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.008556 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="proxy-httpd" Jan 20 16:51:30 crc kubenswrapper[4995]: E0120 16:51:30.008580 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="ceilometer-central-agent" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.008591 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="ceilometer-central-agent" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.008794 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="proxy-httpd" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.008811 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="sg-core" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.008825 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="ceilometer-central-agent" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.008836 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" containerName="ceilometer-notification-agent" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.010519 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6f5d884999-jxjqt" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.010607 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.013946 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.014117 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.027238 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.085130 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:30 crc kubenswrapper[4995]: E0120 16:51:30.085795 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-dhjhm log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[combined-ca-bundle config-data kube-api-access-dhjhm log-httpd run-httpd scripts sg-core-conf-yaml]: context canceled" pod="openstack/ceilometer-0" podUID="21548fd1-1c5e-4bcb-bd01-1fed3f42e115" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.098557 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-run-httpd\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.098689 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-scripts\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.098922 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.099004 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.099035 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-log-httpd\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.099166 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-config-data\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.099189 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhjhm\" (UniqueName: \"kubernetes.io/projected/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-kube-api-access-dhjhm\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.203155 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.203227 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.203247 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-log-httpd\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.203301 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-config-data\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.203320 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhjhm\" (UniqueName: \"kubernetes.io/projected/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-kube-api-access-dhjhm\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.203355 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-run-httpd\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.203386 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-scripts\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.203921 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-run-httpd\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.203969 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-log-httpd\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.207856 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-scripts\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.208261 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-config-data\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.210768 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.226720 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.227466 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhjhm\" (UniqueName: \"kubernetes.io/projected/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-kube-api-access-dhjhm\") pod \"ceilometer-0\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.666907 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.679286 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.711708 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-combined-ca-bundle\") pod \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.711805 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-log-httpd\") pod \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.711908 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-sg-core-conf-yaml\") pod \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.712014 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-config-data\") pod \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.712093 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-run-httpd\") pod \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.712123 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhjhm\" (UniqueName: \"kubernetes.io/projected/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-kube-api-access-dhjhm\") pod \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.712146 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-scripts\") pod \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\" (UID: \"21548fd1-1c5e-4bcb-bd01-1fed3f42e115\") " Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.714420 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "21548fd1-1c5e-4bcb-bd01-1fed3f42e115" (UID: "21548fd1-1c5e-4bcb-bd01-1fed3f42e115"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.714663 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "21548fd1-1c5e-4bcb-bd01-1fed3f42e115" (UID: "21548fd1-1c5e-4bcb-bd01-1fed3f42e115"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.720400 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "21548fd1-1c5e-4bcb-bd01-1fed3f42e115" (UID: "21548fd1-1c5e-4bcb-bd01-1fed3f42e115"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.720492 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-kube-api-access-dhjhm" (OuterVolumeSpecName: "kube-api-access-dhjhm") pod "21548fd1-1c5e-4bcb-bd01-1fed3f42e115" (UID: "21548fd1-1c5e-4bcb-bd01-1fed3f42e115"). InnerVolumeSpecName "kube-api-access-dhjhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.720604 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-scripts" (OuterVolumeSpecName: "scripts") pod "21548fd1-1c5e-4bcb-bd01-1fed3f42e115" (UID: "21548fd1-1c5e-4bcb-bd01-1fed3f42e115"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.738532 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-config-data" (OuterVolumeSpecName: "config-data") pod "21548fd1-1c5e-4bcb-bd01-1fed3f42e115" (UID: "21548fd1-1c5e-4bcb-bd01-1fed3f42e115"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.738559 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "21548fd1-1c5e-4bcb-bd01-1fed3f42e115" (UID: "21548fd1-1c5e-4bcb-bd01-1fed3f42e115"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.814213 4995 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.814250 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.814261 4995 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.814271 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhjhm\" (UniqueName: \"kubernetes.io/projected/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-kube-api-access-dhjhm\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.814281 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.814309 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:30 crc kubenswrapper[4995]: I0120 16:51:30.814316 4995 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/21548fd1-1c5e-4bcb-bd01-1fed3f42e115-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.652775 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.708409 4995 generic.go:334] "Generic (PLEG): container finished" podID="8ac1d023-57b0-4926-9511-200b094b70f7" containerID="ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc" exitCode=0 Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.708467 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8ac1d023-57b0-4926-9511-200b094b70f7","Type":"ContainerDied","Data":"ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc"} Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.708517 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"8ac1d023-57b0-4926-9511-200b094b70f7","Type":"ContainerDied","Data":"98ad81168bda289883ff86f8e01a947191004e00f718d674838d35ecf0af3134"} Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.708530 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.708537 4995 scope.go:117] "RemoveContainer" containerID="ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.708484 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.751164 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-config-data\") pod \"8ac1d023-57b0-4926-9511-200b094b70f7\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.751287 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-combined-ca-bundle\") pod \"8ac1d023-57b0-4926-9511-200b094b70f7\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.751340 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-custom-prometheus-ca\") pod \"8ac1d023-57b0-4926-9511-200b094b70f7\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.751391 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5dz4\" (UniqueName: \"kubernetes.io/projected/8ac1d023-57b0-4926-9511-200b094b70f7-kube-api-access-h5dz4\") pod \"8ac1d023-57b0-4926-9511-200b094b70f7\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.751513 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ac1d023-57b0-4926-9511-200b094b70f7-logs\") pod \"8ac1d023-57b0-4926-9511-200b094b70f7\" (UID: \"8ac1d023-57b0-4926-9511-200b094b70f7\") " Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.756917 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ac1d023-57b0-4926-9511-200b094b70f7-logs" (OuterVolumeSpecName: "logs") pod "8ac1d023-57b0-4926-9511-200b094b70f7" (UID: "8ac1d023-57b0-4926-9511-200b094b70f7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.763211 4995 scope.go:117] "RemoveContainer" containerID="ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.770227 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ac1d023-57b0-4926-9511-200b094b70f7-kube-api-access-h5dz4" (OuterVolumeSpecName: "kube-api-access-h5dz4") pod "8ac1d023-57b0-4926-9511-200b094b70f7" (UID: "8ac1d023-57b0-4926-9511-200b094b70f7"). InnerVolumeSpecName "kube-api-access-h5dz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:31 crc kubenswrapper[4995]: E0120 16:51:31.776892 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc\": container with ID starting with ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc not found: ID does not exist" containerID="ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.777118 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc"} err="failed to get container status \"ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc\": rpc error: code = NotFound desc = could not find container \"ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc\": container with ID starting with ea6653a22cb439fdc9efbd6b082f61373ca00aa8e67e806fd61c6b51c5b61cbc not found: ID does not exist" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.827805 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ac1d023-57b0-4926-9511-200b094b70f7" (UID: "8ac1d023-57b0-4926-9511-200b094b70f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.831438 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-config-data" (OuterVolumeSpecName: "config-data") pod "8ac1d023-57b0-4926-9511-200b094b70f7" (UID: "8ac1d023-57b0-4926-9511-200b094b70f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.841414 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.853280 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.858942 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ac1d023-57b0-4926-9511-200b094b70f7-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.859113 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.859161 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.859187 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5dz4\" (UniqueName: \"kubernetes.io/projected/8ac1d023-57b0-4926-9511-200b094b70f7-kube-api-access-h5dz4\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.865150 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:31 crc kubenswrapper[4995]: E0120 16:51:31.865654 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ac1d023-57b0-4926-9511-200b094b70f7" containerName="watcher-decision-engine" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.865721 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ac1d023-57b0-4926-9511-200b094b70f7" containerName="watcher-decision-engine" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.865970 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ac1d023-57b0-4926-9511-200b094b70f7" containerName="watcher-decision-engine" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.867593 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.868828 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "8ac1d023-57b0-4926-9511-200b094b70f7" (UID: "8ac1d023-57b0-4926-9511-200b094b70f7"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.871019 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.871094 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.878063 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.960593 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.960674 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-config-data\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.960719 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-log-httpd\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.960744 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fncs\" (UniqueName: \"kubernetes.io/projected/0a7b2ac3-5faf-48a7-b5fe-65db22000391-kube-api-access-8fncs\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.960761 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-run-httpd\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.960776 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-scripts\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.960816 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:31 crc kubenswrapper[4995]: I0120 16:51:31.960882 4995 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/8ac1d023-57b0-4926-9511-200b094b70f7-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.000741 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f72d75c-837a-45db-9879-b2fcda5ff029" path="/var/lib/kubelet/pods/1f72d75c-837a-45db-9879-b2fcda5ff029/volumes" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.001673 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21548fd1-1c5e-4bcb-bd01-1fed3f42e115" path="/var/lib/kubelet/pods/21548fd1-1c5e-4bcb-bd01-1fed3f42e115/volumes" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.048287 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.062293 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.062360 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-config-data\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.062393 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-log-httpd\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.062418 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fncs\" (UniqueName: \"kubernetes.io/projected/0a7b2ac3-5faf-48a7-b5fe-65db22000391-kube-api-access-8fncs\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.062436 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-run-httpd\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.062453 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-scripts\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.062473 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.064151 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-log-httpd\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.064812 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-run-httpd\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.066585 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-scripts\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.067532 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.067590 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.067713 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.067899 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-config-data\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.080265 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.081738 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.083790 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.084222 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fncs\" (UniqueName: \"kubernetes.io/projected/0a7b2ac3-5faf-48a7-b5fe-65db22000391-kube-api-access-8fncs\") pod \"ceilometer-0\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.101485 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.164558 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.164611 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhc6q\" (UniqueName: \"kubernetes.io/projected/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-kube-api-access-dhc6q\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.164647 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.164667 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-logs\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.164954 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-config-data\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.193144 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.266676 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-config-data\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.267107 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.267152 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhc6q\" (UniqueName: \"kubernetes.io/projected/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-kube-api-access-dhc6q\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.267194 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.267224 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-logs\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.267699 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-logs\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.271792 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.271878 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-config-data\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.272535 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.285835 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhc6q\" (UniqueName: \"kubernetes.io/projected/f3461cc0-9ae2-4e3b-a0ba-070e6273cba0-kube-api-access-dhc6q\") pod \"watcher-decision-engine-0\" (UID: \"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0\") " pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.401437 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.463792 4995 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod701cf418-d6f5-4326-b237-2fd120de4bd3"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod701cf418-d6f5-4326-b237-2fd120de4bd3] : Timed out while waiting for systemd to remove kubepods-besteffort-pod701cf418_d6f5_4326_b237_2fd120de4bd3.slice" Jan 20 16:51:32 crc kubenswrapper[4995]: E0120 16:51:32.463850 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod701cf418-d6f5-4326-b237-2fd120de4bd3] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod701cf418-d6f5-4326-b237-2fd120de4bd3] : Timed out while waiting for systemd to remove kubepods-besteffort-pod701cf418_d6f5_4326_b237_2fd120de4bd3.slice" pod="openstack/horizon-6db4b5b7df-cv7h4" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.468650 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:32 crc kubenswrapper[4995]: W0120 16:51:32.480238 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a7b2ac3_5faf_48a7_b5fe_65db22000391.slice/crio-6e2a7196c10e608503280ed4c3cd1fe95a9b8c1db45885b5dffe8ce8c2da81a0 WatchSource:0}: Error finding container 6e2a7196c10e608503280ed4c3cd1fe95a9b8c1db45885b5dffe8ce8c2da81a0: Status 404 returned error can't find the container with id 6e2a7196c10e608503280ed4c3cd1fe95a9b8c1db45885b5dffe8ce8c2da81a0 Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.753313 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6db4b5b7df-cv7h4" Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.753391 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerStarted","Data":"6e2a7196c10e608503280ed4c3cd1fe95a9b8c1db45885b5dffe8ce8c2da81a0"} Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.792115 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6db4b5b7df-cv7h4"] Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.799257 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6db4b5b7df-cv7h4"] Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.890909 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 20 16:51:32 crc kubenswrapper[4995]: W0120 16:51:32.909492 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3461cc0_9ae2_4e3b_a0ba_070e6273cba0.slice/crio-b57bef8f9b67648a5a949cfcf9c852a5477aaeca4078a538dcd82dddc61e5bc5 WatchSource:0}: Error finding container b57bef8f9b67648a5a949cfcf9c852a5477aaeca4078a538dcd82dddc61e5bc5: Status 404 returned error can't find the container with id b57bef8f9b67648a5a949cfcf9c852a5477aaeca4078a538dcd82dddc61e5bc5 Jan 20 16:51:32 crc kubenswrapper[4995]: I0120 16:51:32.914964 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:33 crc kubenswrapper[4995]: I0120 16:51:33.115020 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-84df7dbffb-njbnq" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Jan 20 16:51:33 crc kubenswrapper[4995]: I0120 16:51:33.768027 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0","Type":"ContainerStarted","Data":"354c1150a5e3f4359c2f6fe52197f322598b415e3f7a5a0100f1f5a423a5af93"} Jan 20 16:51:33 crc kubenswrapper[4995]: I0120 16:51:33.768088 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"f3461cc0-9ae2-4e3b-a0ba-070e6273cba0","Type":"ContainerStarted","Data":"b57bef8f9b67648a5a949cfcf9c852a5477aaeca4078a538dcd82dddc61e5bc5"} Jan 20 16:51:33 crc kubenswrapper[4995]: I0120 16:51:33.770131 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerStarted","Data":"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148"} Jan 20 16:51:33 crc kubenswrapper[4995]: I0120 16:51:33.788470 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=1.788450053 podStartE2EDuration="1.788450053s" podCreationTimestamp="2026-01-20 16:51:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:51:33.785028909 +0000 UTC m=+1212.029633715" watchObservedRunningTime="2026-01-20 16:51:33.788450053 +0000 UTC m=+1212.033054859" Jan 20 16:51:34 crc kubenswrapper[4995]: I0120 16:51:34.008615 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="701cf418-d6f5-4326-b237-2fd120de4bd3" path="/var/lib/kubelet/pods/701cf418-d6f5-4326-b237-2fd120de4bd3/volumes" Jan 20 16:51:34 crc kubenswrapper[4995]: I0120 16:51:34.009288 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ac1d023-57b0-4926-9511-200b094b70f7" path="/var/lib/kubelet/pods/8ac1d023-57b0-4926-9511-200b094b70f7/volumes" Jan 20 16:51:37 crc kubenswrapper[4995]: I0120 16:51:37.811594 4995 generic.go:334] "Generic (PLEG): container finished" podID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerID="d5e83fb3d6635ff7c522d7f7fcf25ac6e8c713eb35fed7645bab1f6b93ff4b23" exitCode=137 Jan 20 16:51:37 crc kubenswrapper[4995]: I0120 16:51:37.811789 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84df7dbffb-njbnq" event={"ID":"8e877da9-408f-40dd-8e4a-5173ba3d6988","Type":"ContainerDied","Data":"d5e83fb3d6635ff7c522d7f7fcf25ac6e8c713eb35fed7645bab1f6b93ff4b23"} Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.388838 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.556782 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e877da9-408f-40dd-8e4a-5173ba3d6988-logs\") pod \"8e877da9-408f-40dd-8e4a-5173ba3d6988\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.556840 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-secret-key\") pod \"8e877da9-408f-40dd-8e4a-5173ba3d6988\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.556906 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-config-data\") pod \"8e877da9-408f-40dd-8e4a-5173ba3d6988\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.556936 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-tls-certs\") pod \"8e877da9-408f-40dd-8e4a-5173ba3d6988\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.556989 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzxhd\" (UniqueName: \"kubernetes.io/projected/8e877da9-408f-40dd-8e4a-5173ba3d6988-kube-api-access-gzxhd\") pod \"8e877da9-408f-40dd-8e4a-5173ba3d6988\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.557050 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-scripts\") pod \"8e877da9-408f-40dd-8e4a-5173ba3d6988\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.557139 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-combined-ca-bundle\") pod \"8e877da9-408f-40dd-8e4a-5173ba3d6988\" (UID: \"8e877da9-408f-40dd-8e4a-5173ba3d6988\") " Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.557424 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e877da9-408f-40dd-8e4a-5173ba3d6988-logs" (OuterVolumeSpecName: "logs") pod "8e877da9-408f-40dd-8e4a-5173ba3d6988" (UID: "8e877da9-408f-40dd-8e4a-5173ba3d6988"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.563824 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e877da9-408f-40dd-8e4a-5173ba3d6988-kube-api-access-gzxhd" (OuterVolumeSpecName: "kube-api-access-gzxhd") pod "8e877da9-408f-40dd-8e4a-5173ba3d6988" (UID: "8e877da9-408f-40dd-8e4a-5173ba3d6988"). InnerVolumeSpecName "kube-api-access-gzxhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.563927 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "8e877da9-408f-40dd-8e4a-5173ba3d6988" (UID: "8e877da9-408f-40dd-8e4a-5173ba3d6988"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.585139 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-config-data" (OuterVolumeSpecName: "config-data") pod "8e877da9-408f-40dd-8e4a-5173ba3d6988" (UID: "8e877da9-408f-40dd-8e4a-5173ba3d6988"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.589331 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-scripts" (OuterVolumeSpecName: "scripts") pod "8e877da9-408f-40dd-8e4a-5173ba3d6988" (UID: "8e877da9-408f-40dd-8e4a-5173ba3d6988"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.601676 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e877da9-408f-40dd-8e4a-5173ba3d6988" (UID: "8e877da9-408f-40dd-8e4a-5173ba3d6988"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.627265 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "8e877da9-408f-40dd-8e4a-5173ba3d6988" (UID: "8e877da9-408f-40dd-8e4a-5173ba3d6988"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.659543 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8e877da9-408f-40dd-8e4a-5173ba3d6988-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.659589 4995 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.659606 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.659620 4995 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.659633 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzxhd\" (UniqueName: \"kubernetes.io/projected/8e877da9-408f-40dd-8e4a-5173ba3d6988-kube-api-access-gzxhd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.659646 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8e877da9-408f-40dd-8e4a-5173ba3d6988-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.659657 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e877da9-408f-40dd-8e4a-5173ba3d6988-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.821057 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5c6sb" event={"ID":"6f580cd7-fea0-4fb0-a858-74e2deb15c87","Type":"ContainerStarted","Data":"611c1eed9b3a0628f4c97d98931a8c066cfb894620af90551690dc6e0e45d957"} Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.822954 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerStarted","Data":"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e"} Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.825118 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84df7dbffb-njbnq" event={"ID":"8e877da9-408f-40dd-8e4a-5173ba3d6988","Type":"ContainerDied","Data":"f5762486221393b34270e1f452370aae2241bb2d3f32181c7c3ff8712e8cb4ea"} Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.825148 4995 scope.go:117] "RemoveContainer" containerID="b8e948adfe77d2cd9ea089ac1055ca05cf957452ed332421df4521eb39eda287" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.825260 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84df7dbffb-njbnq" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.839800 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-5c6sb" podStartSLOduration=3.255031675 podStartE2EDuration="11.839782412s" podCreationTimestamp="2026-01-20 16:51:27 +0000 UTC" firstStartedPulling="2026-01-20 16:51:29.604783055 +0000 UTC m=+1207.849387851" lastFinishedPulling="2026-01-20 16:51:38.189533792 +0000 UTC m=+1216.434138588" observedRunningTime="2026-01-20 16:51:38.836314158 +0000 UTC m=+1217.080918964" watchObservedRunningTime="2026-01-20 16:51:38.839782412 +0000 UTC m=+1217.084387218" Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.871718 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84df7dbffb-njbnq"] Jan 20 16:51:38 crc kubenswrapper[4995]: I0120 16:51:38.884745 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-84df7dbffb-njbnq"] Jan 20 16:51:39 crc kubenswrapper[4995]: I0120 16:51:39.010219 4995 scope.go:117] "RemoveContainer" containerID="d5e83fb3d6635ff7c522d7f7fcf25ac6e8c713eb35fed7645bab1f6b93ff4b23" Jan 20 16:51:40 crc kubenswrapper[4995]: I0120 16:51:40.000807 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" path="/var/lib/kubelet/pods/8e877da9-408f-40dd-8e4a-5173ba3d6988/volumes" Jan 20 16:51:40 crc kubenswrapper[4995]: I0120 16:51:40.850912 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerStarted","Data":"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3"} Jan 20 16:51:42 crc kubenswrapper[4995]: I0120 16:51:42.402550 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 20 16:51:42 crc kubenswrapper[4995]: I0120 16:51:42.435580 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 20 16:51:42 crc kubenswrapper[4995]: I0120 16:51:42.868279 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 20 16:51:42 crc kubenswrapper[4995]: I0120 16:51:42.898433 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 20 16:51:43 crc kubenswrapper[4995]: I0120 16:51:43.879759 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerStarted","Data":"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb"} Jan 20 16:51:43 crc kubenswrapper[4995]: I0120 16:51:43.879962 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="proxy-httpd" containerID="cri-o://38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb" gracePeriod=30 Jan 20 16:51:43 crc kubenswrapper[4995]: I0120 16:51:43.879946 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="ceilometer-central-agent" containerID="cri-o://9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148" gracePeriod=30 Jan 20 16:51:43 crc kubenswrapper[4995]: I0120 16:51:43.880005 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="ceilometer-notification-agent" containerID="cri-o://31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e" gracePeriod=30 Jan 20 16:51:43 crc kubenswrapper[4995]: I0120 16:51:43.880003 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="sg-core" containerID="cri-o://c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3" gracePeriod=30 Jan 20 16:51:43 crc kubenswrapper[4995]: I0120 16:51:43.931220 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.141087471 podStartE2EDuration="12.931200837s" podCreationTimestamp="2026-01-20 16:51:31 +0000 UTC" firstStartedPulling="2026-01-20 16:51:32.482992778 +0000 UTC m=+1210.727597584" lastFinishedPulling="2026-01-20 16:51:43.273106134 +0000 UTC m=+1221.517710950" observedRunningTime="2026-01-20 16:51:43.914416852 +0000 UTC m=+1222.159021658" watchObservedRunningTime="2026-01-20 16:51:43.931200837 +0000 UTC m=+1222.175805643" Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.876328 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.893205 4995 generic.go:334] "Generic (PLEG): container finished" podID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerID="38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb" exitCode=0 Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.893234 4995 generic.go:334] "Generic (PLEG): container finished" podID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerID="c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3" exitCode=2 Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.893242 4995 generic.go:334] "Generic (PLEG): container finished" podID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerID="31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e" exitCode=0 Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.893249 4995 generic.go:334] "Generic (PLEG): container finished" podID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerID="9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148" exitCode=0 Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.893259 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.894295 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerDied","Data":"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb"} Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.894403 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerDied","Data":"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3"} Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.894498 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerDied","Data":"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e"} Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.894431 4995 scope.go:117] "RemoveContainer" containerID="38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb" Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.894568 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerDied","Data":"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148"} Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.894703 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a7b2ac3-5faf-48a7-b5fe-65db22000391","Type":"ContainerDied","Data":"6e2a7196c10e608503280ed4c3cd1fe95a9b8c1db45885b5dffe8ce8c2da81a0"} Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.932231 4995 scope.go:117] "RemoveContainer" containerID="c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3" Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.955305 4995 scope.go:117] "RemoveContainer" containerID="31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e" Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.974839 4995 scope.go:117] "RemoveContainer" containerID="9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148" Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.998237 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-run-httpd\") pod \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.998287 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-config-data\") pod \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.998344 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fncs\" (UniqueName: \"kubernetes.io/projected/0a7b2ac3-5faf-48a7-b5fe-65db22000391-kube-api-access-8fncs\") pod \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.998381 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-sg-core-conf-yaml\") pod \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.998425 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-scripts\") pod \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.998465 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-log-httpd\") pod \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.998509 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-combined-ca-bundle\") pod \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\" (UID: \"0a7b2ac3-5faf-48a7-b5fe-65db22000391\") " Jan 20 16:51:44 crc kubenswrapper[4995]: I0120 16:51:44.999631 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0a7b2ac3-5faf-48a7-b5fe-65db22000391" (UID: "0a7b2ac3-5faf-48a7-b5fe-65db22000391"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.000188 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0a7b2ac3-5faf-48a7-b5fe-65db22000391" (UID: "0a7b2ac3-5faf-48a7-b5fe-65db22000391"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.005486 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-scripts" (OuterVolumeSpecName: "scripts") pod "0a7b2ac3-5faf-48a7-b5fe-65db22000391" (UID: "0a7b2ac3-5faf-48a7-b5fe-65db22000391"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.010286 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a7b2ac3-5faf-48a7-b5fe-65db22000391-kube-api-access-8fncs" (OuterVolumeSpecName: "kube-api-access-8fncs") pod "0a7b2ac3-5faf-48a7-b5fe-65db22000391" (UID: "0a7b2ac3-5faf-48a7-b5fe-65db22000391"). InnerVolumeSpecName "kube-api-access-8fncs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.011457 4995 scope.go:117] "RemoveContainer" containerID="38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb" Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.011859 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb\": container with ID starting with 38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb not found: ID does not exist" containerID="38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.011903 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb"} err="failed to get container status \"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb\": rpc error: code = NotFound desc = could not find container \"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb\": container with ID starting with 38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.011931 4995 scope.go:117] "RemoveContainer" containerID="c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3" Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.012334 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3\": container with ID starting with c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3 not found: ID does not exist" containerID="c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.012361 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3"} err="failed to get container status \"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3\": rpc error: code = NotFound desc = could not find container \"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3\": container with ID starting with c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3 not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.012375 4995 scope.go:117] "RemoveContainer" containerID="31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e" Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.012598 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e\": container with ID starting with 31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e not found: ID does not exist" containerID="31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.012639 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e"} err="failed to get container status \"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e\": rpc error: code = NotFound desc = could not find container \"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e\": container with ID starting with 31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.012684 4995 scope.go:117] "RemoveContainer" containerID="9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148" Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.012948 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148\": container with ID starting with 9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148 not found: ID does not exist" containerID="9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.012980 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148"} err="failed to get container status \"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148\": rpc error: code = NotFound desc = could not find container \"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148\": container with ID starting with 9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148 not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.012994 4995 scope.go:117] "RemoveContainer" containerID="38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.014458 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb"} err="failed to get container status \"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb\": rpc error: code = NotFound desc = could not find container \"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb\": container with ID starting with 38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.014482 4995 scope.go:117] "RemoveContainer" containerID="c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.014708 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3"} err="failed to get container status \"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3\": rpc error: code = NotFound desc = could not find container \"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3\": container with ID starting with c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3 not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.014727 4995 scope.go:117] "RemoveContainer" containerID="31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.014901 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e"} err="failed to get container status \"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e\": rpc error: code = NotFound desc = could not find container \"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e\": container with ID starting with 31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.014931 4995 scope.go:117] "RemoveContainer" containerID="9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.015201 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148"} err="failed to get container status \"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148\": rpc error: code = NotFound desc = could not find container \"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148\": container with ID starting with 9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148 not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.015228 4995 scope.go:117] "RemoveContainer" containerID="38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.015601 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb"} err="failed to get container status \"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb\": rpc error: code = NotFound desc = could not find container \"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb\": container with ID starting with 38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.015695 4995 scope.go:117] "RemoveContainer" containerID="c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.016063 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3"} err="failed to get container status \"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3\": rpc error: code = NotFound desc = could not find container \"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3\": container with ID starting with c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3 not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.016123 4995 scope.go:117] "RemoveContainer" containerID="31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.016336 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e"} err="failed to get container status \"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e\": rpc error: code = NotFound desc = could not find container \"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e\": container with ID starting with 31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.016358 4995 scope.go:117] "RemoveContainer" containerID="9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.016565 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148"} err="failed to get container status \"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148\": rpc error: code = NotFound desc = could not find container \"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148\": container with ID starting with 9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148 not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.016592 4995 scope.go:117] "RemoveContainer" containerID="38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.016796 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb"} err="failed to get container status \"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb\": rpc error: code = NotFound desc = could not find container \"38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb\": container with ID starting with 38055d58191246f234cc50d189298d634c870ff7c5ae3299eefa73e595bb38eb not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.016986 4995 scope.go:117] "RemoveContainer" containerID="c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.017309 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3"} err="failed to get container status \"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3\": rpc error: code = NotFound desc = could not find container \"c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3\": container with ID starting with c9f4cc2e0cdadf3a08442a2086d5bf5ca9609cd24f0ae386fc21e657308465c3 not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.017339 4995 scope.go:117] "RemoveContainer" containerID="31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.017524 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e"} err="failed to get container status \"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e\": rpc error: code = NotFound desc = could not find container \"31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e\": container with ID starting with 31bf9c8785b8bfe938d34fb4b7359a8b29205de1085b7edf5425a3044c71dc0e not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.017542 4995 scope.go:117] "RemoveContainer" containerID="9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.017710 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148"} err="failed to get container status \"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148\": rpc error: code = NotFound desc = could not find container \"9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148\": container with ID starting with 9d7aa9ebd9487af8dd624cae9d2eed16e530194a4502ac16f21d4982b59dc148 not found: ID does not exist" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.033651 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0a7b2ac3-5faf-48a7-b5fe-65db22000391" (UID: "0a7b2ac3-5faf-48a7-b5fe-65db22000391"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.100736 4995 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.100765 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fncs\" (UniqueName: \"kubernetes.io/projected/0a7b2ac3-5faf-48a7-b5fe-65db22000391-kube-api-access-8fncs\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.100776 4995 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.100788 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.100799 4995 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a7b2ac3-5faf-48a7-b5fe-65db22000391-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.108485 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a7b2ac3-5faf-48a7-b5fe-65db22000391" (UID: "0a7b2ac3-5faf-48a7-b5fe-65db22000391"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.116554 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-config-data" (OuterVolumeSpecName: "config-data") pod "0a7b2ac3-5faf-48a7-b5fe-65db22000391" (UID: "0a7b2ac3-5faf-48a7-b5fe-65db22000391"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.203054 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.203106 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7b2ac3-5faf-48a7-b5fe-65db22000391-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.224641 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.234230 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.248321 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.248976 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="sg-core" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249000 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="sg-core" Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.249011 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="ceilometer-notification-agent" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249020 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="ceilometer-notification-agent" Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.249038 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249043 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.249055 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="ceilometer-central-agent" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249061 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="ceilometer-central-agent" Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.249071 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="proxy-httpd" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249080 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="proxy-httpd" Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.249107 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon-log" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249114 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon-log" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249279 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="ceilometer-notification-agent" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249295 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="proxy-httpd" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249313 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon-log" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249320 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e877da9-408f-40dd-8e4a-5173ba3d6988" containerName="horizon" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249330 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="sg-core" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.249339 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" containerName="ceilometer-central-agent" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.250941 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.253282 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.253300 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.263185 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.378394 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:45 crc kubenswrapper[4995]: E0120 16:51:45.379084 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-jjq8p log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="5603d9c6-7747-4f58-93c5-bca421d44998" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.408837 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-run-httpd\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.408896 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.408926 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-config-data\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.409674 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjq8p\" (UniqueName: \"kubernetes.io/projected/5603d9c6-7747-4f58-93c5-bca421d44998-kube-api-access-jjq8p\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.409712 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-log-httpd\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.409850 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-scripts\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.410038 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.511566 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-scripts\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.511671 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.511739 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-run-httpd\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.511765 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.512551 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-run-httpd\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.512637 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-config-data\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.513253 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjq8p\" (UniqueName: \"kubernetes.io/projected/5603d9c6-7747-4f58-93c5-bca421d44998-kube-api-access-jjq8p\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.513299 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-log-httpd\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.513892 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-log-httpd\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.517022 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-scripts\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.520861 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.532390 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjq8p\" (UniqueName: \"kubernetes.io/projected/5603d9c6-7747-4f58-93c5-bca421d44998-kube-api-access-jjq8p\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.532399 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.536021 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-config-data\") pod \"ceilometer-0\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.904825 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:45 crc kubenswrapper[4995]: I0120 16:51:45.918208 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.006575 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a7b2ac3-5faf-48a7-b5fe-65db22000391" path="/var/lib/kubelet/pods/0a7b2ac3-5faf-48a7-b5fe-65db22000391/volumes" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.023190 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-run-httpd\") pod \"5603d9c6-7747-4f58-93c5-bca421d44998\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.023263 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-config-data\") pod \"5603d9c6-7747-4f58-93c5-bca421d44998\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.023312 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-scripts\") pod \"5603d9c6-7747-4f58-93c5-bca421d44998\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.023411 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-log-httpd\") pod \"5603d9c6-7747-4f58-93c5-bca421d44998\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.023438 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-sg-core-conf-yaml\") pod \"5603d9c6-7747-4f58-93c5-bca421d44998\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.023518 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjq8p\" (UniqueName: \"kubernetes.io/projected/5603d9c6-7747-4f58-93c5-bca421d44998-kube-api-access-jjq8p\") pod \"5603d9c6-7747-4f58-93c5-bca421d44998\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.023618 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-combined-ca-bundle\") pod \"5603d9c6-7747-4f58-93c5-bca421d44998\" (UID: \"5603d9c6-7747-4f58-93c5-bca421d44998\") " Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.023967 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5603d9c6-7747-4f58-93c5-bca421d44998" (UID: "5603d9c6-7747-4f58-93c5-bca421d44998"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.024707 4995 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.025102 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5603d9c6-7747-4f58-93c5-bca421d44998" (UID: "5603d9c6-7747-4f58-93c5-bca421d44998"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.030576 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-config-data" (OuterVolumeSpecName: "config-data") pod "5603d9c6-7747-4f58-93c5-bca421d44998" (UID: "5603d9c6-7747-4f58-93c5-bca421d44998"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.031189 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-scripts" (OuterVolumeSpecName: "scripts") pod "5603d9c6-7747-4f58-93c5-bca421d44998" (UID: "5603d9c6-7747-4f58-93c5-bca421d44998"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.033194 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5603d9c6-7747-4f58-93c5-bca421d44998" (UID: "5603d9c6-7747-4f58-93c5-bca421d44998"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.034229 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5603d9c6-7747-4f58-93c5-bca421d44998-kube-api-access-jjq8p" (OuterVolumeSpecName: "kube-api-access-jjq8p") pod "5603d9c6-7747-4f58-93c5-bca421d44998" (UID: "5603d9c6-7747-4f58-93c5-bca421d44998"). InnerVolumeSpecName "kube-api-access-jjq8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.040291 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5603d9c6-7747-4f58-93c5-bca421d44998" (UID: "5603d9c6-7747-4f58-93c5-bca421d44998"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.126832 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.126866 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.126875 4995 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5603d9c6-7747-4f58-93c5-bca421d44998-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.126884 4995 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.126893 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjq8p\" (UniqueName: \"kubernetes.io/projected/5603d9c6-7747-4f58-93c5-bca421d44998-kube-api-access-jjq8p\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.126903 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5603d9c6-7747-4f58-93c5-bca421d44998-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:46 crc kubenswrapper[4995]: I0120 16:51:46.913620 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.031250 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.046248 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.057160 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.063449 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.066579 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.066699 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.077439 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.144467 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-log-httpd\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.144510 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spw4j\" (UniqueName: \"kubernetes.io/projected/b9a4163d-22d8-416c-a98a-2da6573a2646-kube-api-access-spw4j\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.144559 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.144729 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-config-data\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.144746 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-scripts\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.145056 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-run-httpd\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.145080 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.247134 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-run-httpd\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.247180 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.247259 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-log-httpd\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.247276 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spw4j\" (UniqueName: \"kubernetes.io/projected/b9a4163d-22d8-416c-a98a-2da6573a2646-kube-api-access-spw4j\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.247317 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.247358 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-config-data\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.247374 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-scripts\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.247886 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-log-httpd\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.247989 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-run-httpd\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.263833 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.282066 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-config-data\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.287972 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spw4j\" (UniqueName: \"kubernetes.io/projected/b9a4163d-22d8-416c-a98a-2da6573a2646-kube-api-access-spw4j\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.291783 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-scripts\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.297520 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.393095 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.856035 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:47 crc kubenswrapper[4995]: W0120 16:51:47.887640 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9a4163d_22d8_416c_a98a_2da6573a2646.slice/crio-80f841da8b7145e0bbd78c671a48f2a5845cddc75ff1c5f8d1a3727d71dc8bab WatchSource:0}: Error finding container 80f841da8b7145e0bbd78c671a48f2a5845cddc75ff1c5f8d1a3727d71dc8bab: Status 404 returned error can't find the container with id 80f841da8b7145e0bbd78c671a48f2a5845cddc75ff1c5f8d1a3727d71dc8bab Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.894344 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:47 crc kubenswrapper[4995]: I0120 16:51:47.923565 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerStarted","Data":"80f841da8b7145e0bbd78c671a48f2a5845cddc75ff1c5f8d1a3727d71dc8bab"} Jan 20 16:51:48 crc kubenswrapper[4995]: I0120 16:51:48.001215 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5603d9c6-7747-4f58-93c5-bca421d44998" path="/var/lib/kubelet/pods/5603d9c6-7747-4f58-93c5-bca421d44998/volumes" Jan 20 16:51:49 crc kubenswrapper[4995]: I0120 16:51:49.941285 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerStarted","Data":"f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21"} Jan 20 16:51:52 crc kubenswrapper[4995]: I0120 16:51:52.969915 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerStarted","Data":"f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e"} Jan 20 16:51:53 crc kubenswrapper[4995]: I0120 16:51:53.980910 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerStarted","Data":"b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c"} Jan 20 16:51:56 crc kubenswrapper[4995]: I0120 16:51:56.009162 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerStarted","Data":"ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d"} Jan 20 16:51:56 crc kubenswrapper[4995]: I0120 16:51:56.010453 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 16:51:56 crc kubenswrapper[4995]: I0120 16:51:56.009362 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="proxy-httpd" containerID="cri-o://ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d" gracePeriod=30 Jan 20 16:51:56 crc kubenswrapper[4995]: I0120 16:51:56.009409 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="sg-core" containerID="cri-o://b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c" gracePeriod=30 Jan 20 16:51:56 crc kubenswrapper[4995]: I0120 16:51:56.009455 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="ceilometer-notification-agent" containerID="cri-o://f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e" gracePeriod=30 Jan 20 16:51:56 crc kubenswrapper[4995]: I0120 16:51:56.009310 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="ceilometer-central-agent" containerID="cri-o://f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21" gracePeriod=30 Jan 20 16:51:56 crc kubenswrapper[4995]: I0120 16:51:56.047232 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.855620696 podStartE2EDuration="9.047208911s" podCreationTimestamp="2026-01-20 16:51:47 +0000 UTC" firstStartedPulling="2026-01-20 16:51:47.889856186 +0000 UTC m=+1226.134460992" lastFinishedPulling="2026-01-20 16:51:55.081444401 +0000 UTC m=+1233.326049207" observedRunningTime="2026-01-20 16:51:56.034427166 +0000 UTC m=+1234.279031972" watchObservedRunningTime="2026-01-20 16:51:56.047208911 +0000 UTC m=+1234.291813717" Jan 20 16:51:57 crc kubenswrapper[4995]: I0120 16:51:57.023251 4995 generic.go:334] "Generic (PLEG): container finished" podID="6f580cd7-fea0-4fb0-a858-74e2deb15c87" containerID="611c1eed9b3a0628f4c97d98931a8c066cfb894620af90551690dc6e0e45d957" exitCode=0 Jan 20 16:51:57 crc kubenswrapper[4995]: I0120 16:51:57.023560 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5c6sb" event={"ID":"6f580cd7-fea0-4fb0-a858-74e2deb15c87","Type":"ContainerDied","Data":"611c1eed9b3a0628f4c97d98931a8c066cfb894620af90551690dc6e0e45d957"} Jan 20 16:51:57 crc kubenswrapper[4995]: I0120 16:51:57.030666 4995 generic.go:334] "Generic (PLEG): container finished" podID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerID="ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d" exitCode=0 Jan 20 16:51:57 crc kubenswrapper[4995]: I0120 16:51:57.030706 4995 generic.go:334] "Generic (PLEG): container finished" podID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerID="b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c" exitCode=2 Jan 20 16:51:57 crc kubenswrapper[4995]: I0120 16:51:57.030715 4995 generic.go:334] "Generic (PLEG): container finished" podID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerID="f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e" exitCode=0 Jan 20 16:51:57 crc kubenswrapper[4995]: I0120 16:51:57.030760 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerDied","Data":"ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d"} Jan 20 16:51:57 crc kubenswrapper[4995]: I0120 16:51:57.030842 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerDied","Data":"b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c"} Jan 20 16:51:57 crc kubenswrapper[4995]: I0120 16:51:57.030863 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerDied","Data":"f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e"} Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.503732 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.666251 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-config-data\") pod \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.666495 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-combined-ca-bundle\") pod \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.666571 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-scripts\") pod \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.666605 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7s2h9\" (UniqueName: \"kubernetes.io/projected/6f580cd7-fea0-4fb0-a858-74e2deb15c87-kube-api-access-7s2h9\") pod \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\" (UID: \"6f580cd7-fea0-4fb0-a858-74e2deb15c87\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.672186 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-scripts" (OuterVolumeSpecName: "scripts") pod "6f580cd7-fea0-4fb0-a858-74e2deb15c87" (UID: "6f580cd7-fea0-4fb0-a858-74e2deb15c87"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.684106 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f580cd7-fea0-4fb0-a858-74e2deb15c87-kube-api-access-7s2h9" (OuterVolumeSpecName: "kube-api-access-7s2h9") pod "6f580cd7-fea0-4fb0-a858-74e2deb15c87" (UID: "6f580cd7-fea0-4fb0-a858-74e2deb15c87"). InnerVolumeSpecName "kube-api-access-7s2h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.698373 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-config-data" (OuterVolumeSpecName: "config-data") pod "6f580cd7-fea0-4fb0-a858-74e2deb15c87" (UID: "6f580cd7-fea0-4fb0-a858-74e2deb15c87"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.700306 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f580cd7-fea0-4fb0-a858-74e2deb15c87" (UID: "6f580cd7-fea0-4fb0-a858-74e2deb15c87"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.769851 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.769902 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.769915 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f580cd7-fea0-4fb0-a858-74e2deb15c87-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.769925 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7s2h9\" (UniqueName: \"kubernetes.io/projected/6f580cd7-fea0-4fb0-a858-74e2deb15c87-kube-api-access-7s2h9\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.772619 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.871598 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-combined-ca-bundle\") pod \"b9a4163d-22d8-416c-a98a-2da6573a2646\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.871825 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-run-httpd\") pod \"b9a4163d-22d8-416c-a98a-2da6573a2646\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.871889 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-sg-core-conf-yaml\") pod \"b9a4163d-22d8-416c-a98a-2da6573a2646\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.872028 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-config-data\") pod \"b9a4163d-22d8-416c-a98a-2da6573a2646\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.872173 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-log-httpd\") pod \"b9a4163d-22d8-416c-a98a-2da6573a2646\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.872257 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spw4j\" (UniqueName: \"kubernetes.io/projected/b9a4163d-22d8-416c-a98a-2da6573a2646-kube-api-access-spw4j\") pod \"b9a4163d-22d8-416c-a98a-2da6573a2646\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.872295 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-scripts\") pod \"b9a4163d-22d8-416c-a98a-2da6573a2646\" (UID: \"b9a4163d-22d8-416c-a98a-2da6573a2646\") " Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.873000 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b9a4163d-22d8-416c-a98a-2da6573a2646" (UID: "b9a4163d-22d8-416c-a98a-2da6573a2646"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.873159 4995 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.874408 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b9a4163d-22d8-416c-a98a-2da6573a2646" (UID: "b9a4163d-22d8-416c-a98a-2da6573a2646"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.876245 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9a4163d-22d8-416c-a98a-2da6573a2646-kube-api-access-spw4j" (OuterVolumeSpecName: "kube-api-access-spw4j") pod "b9a4163d-22d8-416c-a98a-2da6573a2646" (UID: "b9a4163d-22d8-416c-a98a-2da6573a2646"). InnerVolumeSpecName "kube-api-access-spw4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.876663 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-scripts" (OuterVolumeSpecName: "scripts") pod "b9a4163d-22d8-416c-a98a-2da6573a2646" (UID: "b9a4163d-22d8-416c-a98a-2da6573a2646"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.912107 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b9a4163d-22d8-416c-a98a-2da6573a2646" (UID: "b9a4163d-22d8-416c-a98a-2da6573a2646"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.949583 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9a4163d-22d8-416c-a98a-2da6573a2646" (UID: "b9a4163d-22d8-416c-a98a-2da6573a2646"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.974833 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spw4j\" (UniqueName: \"kubernetes.io/projected/b9a4163d-22d8-416c-a98a-2da6573a2646-kube-api-access-spw4j\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.974860 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.974869 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.974878 4995 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9a4163d-22d8-416c-a98a-2da6573a2646-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.974886 4995 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:58 crc kubenswrapper[4995]: I0120 16:51:58.976685 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-config-data" (OuterVolumeSpecName: "config-data") pod "b9a4163d-22d8-416c-a98a-2da6573a2646" (UID: "b9a4163d-22d8-416c-a98a-2da6573a2646"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.053105 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-5c6sb" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.053479 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-5c6sb" event={"ID":"6f580cd7-fea0-4fb0-a858-74e2deb15c87","Type":"ContainerDied","Data":"179a552976edea38e064f45b8a16c48b1f2f79e8cab6d8a1ab000c50876b6c73"} Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.053530 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="179a552976edea38e064f45b8a16c48b1f2f79e8cab6d8a1ab000c50876b6c73" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.056157 4995 generic.go:334] "Generic (PLEG): container finished" podID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerID="f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21" exitCode=0 Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.056253 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.056264 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerDied","Data":"f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21"} Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.056334 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b9a4163d-22d8-416c-a98a-2da6573a2646","Type":"ContainerDied","Data":"80f841da8b7145e0bbd78c671a48f2a5845cddc75ff1c5f8d1a3727d71dc8bab"} Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.056371 4995 scope.go:117] "RemoveContainer" containerID="ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.076423 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9a4163d-22d8-416c-a98a-2da6573a2646-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.086304 4995 scope.go:117] "RemoveContainer" containerID="b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.105498 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.121627 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.122007 4995 scope.go:117] "RemoveContainer" containerID="f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.138976 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:59 crc kubenswrapper[4995]: E0120 16:51:59.139463 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="sg-core" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139489 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="sg-core" Jan 20 16:51:59 crc kubenswrapper[4995]: E0120 16:51:59.139520 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f580cd7-fea0-4fb0-a858-74e2deb15c87" containerName="nova-cell0-conductor-db-sync" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139530 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f580cd7-fea0-4fb0-a858-74e2deb15c87" containerName="nova-cell0-conductor-db-sync" Jan 20 16:51:59 crc kubenswrapper[4995]: E0120 16:51:59.139546 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="ceilometer-central-agent" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139555 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="ceilometer-central-agent" Jan 20 16:51:59 crc kubenswrapper[4995]: E0120 16:51:59.139574 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="ceilometer-notification-agent" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139583 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="ceilometer-notification-agent" Jan 20 16:51:59 crc kubenswrapper[4995]: E0120 16:51:59.139601 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="proxy-httpd" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139607 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="proxy-httpd" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139848 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="sg-core" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139873 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f580cd7-fea0-4fb0-a858-74e2deb15c87" containerName="nova-cell0-conductor-db-sync" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139887 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="ceilometer-notification-agent" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139897 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="ceilometer-central-agent" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.139915 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" containerName="proxy-httpd" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.142030 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.158337 4995 scope.go:117] "RemoveContainer" containerID="f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.158850 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.159201 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.181364 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.193434 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.194968 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.198597 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.201975 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-s86mn" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.207776 4995 scope.go:117] "RemoveContainer" containerID="ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d" Jan 20 16:51:59 crc kubenswrapper[4995]: E0120 16:51:59.208221 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d\": container with ID starting with ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d not found: ID does not exist" containerID="ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.208260 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d"} err="failed to get container status \"ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d\": rpc error: code = NotFound desc = could not find container \"ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d\": container with ID starting with ce1a521c32a3c23aaf829d43a5a8fd73092c6149d16ad69276af68d1bcb19a9d not found: ID does not exist" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.208294 4995 scope.go:117] "RemoveContainer" containerID="b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.208303 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 16:51:59 crc kubenswrapper[4995]: E0120 16:51:59.208583 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c\": container with ID starting with b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c not found: ID does not exist" containerID="b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.208619 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c"} err="failed to get container status \"b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c\": rpc error: code = NotFound desc = could not find container \"b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c\": container with ID starting with b39e776d043a594049d6127d09eba34a99ddd1ccabee9e5fcdd4a78a7472233c not found: ID does not exist" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.208649 4995 scope.go:117] "RemoveContainer" containerID="f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e" Jan 20 16:51:59 crc kubenswrapper[4995]: E0120 16:51:59.209809 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e\": container with ID starting with f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e not found: ID does not exist" containerID="f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.209842 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e"} err="failed to get container status \"f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e\": rpc error: code = NotFound desc = could not find container \"f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e\": container with ID starting with f348cb74a2ef91a6c1ebb8b4b65614be2012652e1115abae231d181f41d9e01e not found: ID does not exist" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.209890 4995 scope.go:117] "RemoveContainer" containerID="f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21" Jan 20 16:51:59 crc kubenswrapper[4995]: E0120 16:51:59.210268 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21\": container with ID starting with f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21 not found: ID does not exist" containerID="f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.210295 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21"} err="failed to get container status \"f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21\": rpc error: code = NotFound desc = could not find container \"f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21\": container with ID starting with f2ec7a888f31c2e569570f981c19710a2e8d31438b453f5bea573c0a62052d21 not found: ID does not exist" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.279553 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rq5g\" (UniqueName: \"kubernetes.io/projected/3d353583-476a-415d-ad4f-21c4e7db21f4-kube-api-access-9rq5g\") pod \"nova-cell0-conductor-0\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.279598 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-765wp\" (UniqueName: \"kubernetes.io/projected/19a0bd62-8fb6-4078-af42-ac2f1e919071-kube-api-access-765wp\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.279625 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.279667 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.279812 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.279920 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-scripts\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.280101 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-config-data\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.280139 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-run-httpd\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.280256 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-log-httpd\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.280362 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.382191 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.382517 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-scripts\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.382641 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-config-data\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.382745 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-run-httpd\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.382920 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-log-httpd\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.383050 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.383188 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rq5g\" (UniqueName: \"kubernetes.io/projected/3d353583-476a-415d-ad4f-21c4e7db21f4-kube-api-access-9rq5g\") pod \"nova-cell0-conductor-0\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.383301 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-765wp\" (UniqueName: \"kubernetes.io/projected/19a0bd62-8fb6-4078-af42-ac2f1e919071-kube-api-access-765wp\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.383393 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.383506 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.384727 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-log-httpd\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.384745 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-run-httpd\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.388236 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-scripts\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.388617 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-config-data\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.389160 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.389809 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.398031 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.398784 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.407141 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-765wp\" (UniqueName: \"kubernetes.io/projected/19a0bd62-8fb6-4078-af42-ac2f1e919071-kube-api-access-765wp\") pod \"ceilometer-0\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.420874 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rq5g\" (UniqueName: \"kubernetes.io/projected/3d353583-476a-415d-ad4f-21c4e7db21f4-kube-api-access-9rq5g\") pod \"nova-cell0-conductor-0\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.497280 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.524694 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 20 16:51:59 crc kubenswrapper[4995]: I0120 16:51:59.999618 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9a4163d-22d8-416c-a98a-2da6573a2646" path="/var/lib/kubelet/pods/b9a4163d-22d8-416c-a98a-2da6573a2646/volumes" Jan 20 16:52:00 crc kubenswrapper[4995]: W0120 16:52:00.093327 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19a0bd62_8fb6_4078_af42_ac2f1e919071.slice/crio-1cf07431d6f113c829a1e99b6c7a1e713ba5672de0bb10e16389fd631bdb66a4 WatchSource:0}: Error finding container 1cf07431d6f113c829a1e99b6c7a1e713ba5672de0bb10e16389fd631bdb66a4: Status 404 returned error can't find the container with id 1cf07431d6f113c829a1e99b6c7a1e713ba5672de0bb10e16389fd631bdb66a4 Jan 20 16:52:00 crc kubenswrapper[4995]: I0120 16:52:00.097998 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:00 crc kubenswrapper[4995]: W0120 16:52:00.108106 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d353583_476a_415d_ad4f_21c4e7db21f4.slice/crio-0c193d4dbb143855731ab59cc8f4d83eef326e44eff37c6334c02ccb988f0770 WatchSource:0}: Error finding container 0c193d4dbb143855731ab59cc8f4d83eef326e44eff37c6334c02ccb988f0770: Status 404 returned error can't find the container with id 0c193d4dbb143855731ab59cc8f4d83eef326e44eff37c6334c02ccb988f0770 Jan 20 16:52:00 crc kubenswrapper[4995]: I0120 16:52:00.117579 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 16:52:01 crc kubenswrapper[4995]: I0120 16:52:01.076679 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3d353583-476a-415d-ad4f-21c4e7db21f4","Type":"ContainerStarted","Data":"777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445"} Jan 20 16:52:01 crc kubenswrapper[4995]: I0120 16:52:01.077031 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:01 crc kubenswrapper[4995]: I0120 16:52:01.077045 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3d353583-476a-415d-ad4f-21c4e7db21f4","Type":"ContainerStarted","Data":"0c193d4dbb143855731ab59cc8f4d83eef326e44eff37c6334c02ccb988f0770"} Jan 20 16:52:01 crc kubenswrapper[4995]: I0120 16:52:01.081521 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerStarted","Data":"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa"} Jan 20 16:52:01 crc kubenswrapper[4995]: I0120 16:52:01.081581 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerStarted","Data":"1cf07431d6f113c829a1e99b6c7a1e713ba5672de0bb10e16389fd631bdb66a4"} Jan 20 16:52:01 crc kubenswrapper[4995]: I0120 16:52:01.110496 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.110467724 podStartE2EDuration="2.110467724s" podCreationTimestamp="2026-01-20 16:51:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:01.096516666 +0000 UTC m=+1239.341121472" watchObservedRunningTime="2026-01-20 16:52:01.110467724 +0000 UTC m=+1239.355072550" Jan 20 16:52:02 crc kubenswrapper[4995]: I0120 16:52:02.097312 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerStarted","Data":"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3"} Jan 20 16:52:03 crc kubenswrapper[4995]: I0120 16:52:03.107604 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerStarted","Data":"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e"} Jan 20 16:52:04 crc kubenswrapper[4995]: I0120 16:52:04.130348 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerStarted","Data":"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5"} Jan 20 16:52:04 crc kubenswrapper[4995]: I0120 16:52:04.131685 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 16:52:04 crc kubenswrapper[4995]: I0120 16:52:04.166643 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.609278533 podStartE2EDuration="5.166618399s" podCreationTimestamp="2026-01-20 16:51:59 +0000 UTC" firstStartedPulling="2026-01-20 16:52:00.095721286 +0000 UTC m=+1238.340326092" lastFinishedPulling="2026-01-20 16:52:03.653061152 +0000 UTC m=+1241.897665958" observedRunningTime="2026-01-20 16:52:04.153619357 +0000 UTC m=+1242.398224173" watchObservedRunningTime="2026-01-20 16:52:04.166618399 +0000 UTC m=+1242.411223215" Jan 20 16:52:07 crc kubenswrapper[4995]: I0120 16:52:07.730205 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:52:07 crc kubenswrapper[4995]: I0120 16:52:07.730900 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerName="glance-log" containerID="cri-o://1daf20f0c67b095f761b9f1f0f0a04598d305aeda734d3601e0911d7243afb73" gracePeriod=30 Jan 20 16:52:07 crc kubenswrapper[4995]: I0120 16:52:07.731346 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerName="glance-httpd" containerID="cri-o://83f8aa16c6e0c99d44dc716b5a2f98a5b1ea1941246e20e15e580378bfb33b78" gracePeriod=30 Jan 20 16:52:08 crc kubenswrapper[4995]: I0120 16:52:08.212531 4995 generic.go:334] "Generic (PLEG): container finished" podID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerID="1daf20f0c67b095f761b9f1f0f0a04598d305aeda734d3601e0911d7243afb73" exitCode=143 Jan 20 16:52:08 crc kubenswrapper[4995]: I0120 16:52:08.212573 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c66738b7-dac3-4487-b128-215f8e2eb48f","Type":"ContainerDied","Data":"1daf20f0c67b095f761b9f1f0f0a04598d305aeda734d3601e0911d7243afb73"} Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.047601 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.048171 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="3d353583-476a-415d-ad4f-21c4e7db21f4" containerName="nova-cell0-conductor-conductor" containerID="cri-o://777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445" gracePeriod=30 Jan 20 16:52:09 crc kubenswrapper[4995]: E0120 16:52:09.052921 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 20 16:52:09 crc kubenswrapper[4995]: E0120 16:52:09.054653 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 20 16:52:09 crc kubenswrapper[4995]: E0120 16:52:09.056139 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 20 16:52:09 crc kubenswrapper[4995]: E0120 16:52:09.056326 4995 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="3d353583-476a-415d-ad4f-21c4e7db21f4" containerName="nova-cell0-conductor-conductor" Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.087978 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.091838 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3edee2ed-6825-49d5-9556-a33a54331f20" containerName="glance-log" containerID="cri-o://11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b" gracePeriod=30 Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.091943 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3edee2ed-6825-49d5-9556-a33a54331f20" containerName="glance-httpd" containerID="cri-o://285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c" gracePeriod=30 Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.224049 4995 generic.go:334] "Generic (PLEG): container finished" podID="3edee2ed-6825-49d5-9556-a33a54331f20" containerID="11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b" exitCode=143 Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.224145 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3edee2ed-6825-49d5-9556-a33a54331f20","Type":"ContainerDied","Data":"11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b"} Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.300056 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.300371 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="ceilometer-central-agent" containerID="cri-o://4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa" gracePeriod=30 Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.300506 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="ceilometer-notification-agent" containerID="cri-o://bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3" gracePeriod=30 Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.300504 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="sg-core" containerID="cri-o://1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e" gracePeriod=30 Jan 20 16:52:09 crc kubenswrapper[4995]: I0120 16:52:09.301347 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="proxy-httpd" containerID="cri-o://6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5" gracePeriod=30 Jan 20 16:52:09 crc kubenswrapper[4995]: E0120 16:52:09.527218 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 20 16:52:09 crc kubenswrapper[4995]: E0120 16:52:09.528613 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 20 16:52:09 crc kubenswrapper[4995]: E0120 16:52:09.531673 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 20 16:52:09 crc kubenswrapper[4995]: E0120 16:52:09.531738 4995 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="3d353583-476a-415d-ad4f-21c4e7db21f4" containerName="nova-cell0-conductor-conductor" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.063361 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.092911 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-log-httpd\") pod \"19a0bd62-8fb6-4078-af42-ac2f1e919071\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.092971 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-sg-core-conf-yaml\") pod \"19a0bd62-8fb6-4078-af42-ac2f1e919071\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.093024 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-765wp\" (UniqueName: \"kubernetes.io/projected/19a0bd62-8fb6-4078-af42-ac2f1e919071-kube-api-access-765wp\") pod \"19a0bd62-8fb6-4078-af42-ac2f1e919071\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.093071 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-config-data\") pod \"19a0bd62-8fb6-4078-af42-ac2f1e919071\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.093197 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-scripts\") pod \"19a0bd62-8fb6-4078-af42-ac2f1e919071\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.093260 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-combined-ca-bundle\") pod \"19a0bd62-8fb6-4078-af42-ac2f1e919071\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.093307 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-run-httpd\") pod \"19a0bd62-8fb6-4078-af42-ac2f1e919071\" (UID: \"19a0bd62-8fb6-4078-af42-ac2f1e919071\") " Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.094602 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "19a0bd62-8fb6-4078-af42-ac2f1e919071" (UID: "19a0bd62-8fb6-4078-af42-ac2f1e919071"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.095367 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "19a0bd62-8fb6-4078-af42-ac2f1e919071" (UID: "19a0bd62-8fb6-4078-af42-ac2f1e919071"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.100965 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19a0bd62-8fb6-4078-af42-ac2f1e919071-kube-api-access-765wp" (OuterVolumeSpecName: "kube-api-access-765wp") pod "19a0bd62-8fb6-4078-af42-ac2f1e919071" (UID: "19a0bd62-8fb6-4078-af42-ac2f1e919071"). InnerVolumeSpecName "kube-api-access-765wp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.105221 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-scripts" (OuterVolumeSpecName: "scripts") pod "19a0bd62-8fb6-4078-af42-ac2f1e919071" (UID: "19a0bd62-8fb6-4078-af42-ac2f1e919071"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.143357 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "19a0bd62-8fb6-4078-af42-ac2f1e919071" (UID: "19a0bd62-8fb6-4078-af42-ac2f1e919071"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.182197 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19a0bd62-8fb6-4078-af42-ac2f1e919071" (UID: "19a0bd62-8fb6-4078-af42-ac2f1e919071"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.195928 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.195967 4995 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.195978 4995 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/19a0bd62-8fb6-4078-af42-ac2f1e919071-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.195988 4995 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.195999 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-765wp\" (UniqueName: \"kubernetes.io/projected/19a0bd62-8fb6-4078-af42-ac2f1e919071-kube-api-access-765wp\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.196012 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.213760 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-config-data" (OuterVolumeSpecName: "config-data") pod "19a0bd62-8fb6-4078-af42-ac2f1e919071" (UID: "19a0bd62-8fb6-4078-af42-ac2f1e919071"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237459 4995 generic.go:334] "Generic (PLEG): container finished" podID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerID="6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5" exitCode=0 Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237509 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerDied","Data":"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5"} Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237537 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237564 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerDied","Data":"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e"} Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237524 4995 generic.go:334] "Generic (PLEG): container finished" podID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerID="1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e" exitCode=2 Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237583 4995 scope.go:117] "RemoveContainer" containerID="6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237610 4995 generic.go:334] "Generic (PLEG): container finished" podID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerID="bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3" exitCode=0 Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237632 4995 generic.go:334] "Generic (PLEG): container finished" podID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerID="4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa" exitCode=0 Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237656 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerDied","Data":"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3"} Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237875 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerDied","Data":"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa"} Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.237896 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"19a0bd62-8fb6-4078-af42-ac2f1e919071","Type":"ContainerDied","Data":"1cf07431d6f113c829a1e99b6c7a1e713ba5672de0bb10e16389fd631bdb66a4"} Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.297393 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19a0bd62-8fb6-4078-af42-ac2f1e919071-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.314891 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.315644 4995 scope.go:117] "RemoveContainer" containerID="1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.323387 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.340165 4995 scope.go:117] "RemoveContainer" containerID="bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.347065 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:10 crc kubenswrapper[4995]: E0120 16:52:10.347526 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="ceilometer-central-agent" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.347541 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="ceilometer-central-agent" Jan 20 16:52:10 crc kubenswrapper[4995]: E0120 16:52:10.347556 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="proxy-httpd" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.347562 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="proxy-httpd" Jan 20 16:52:10 crc kubenswrapper[4995]: E0120 16:52:10.347588 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="sg-core" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.347600 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="sg-core" Jan 20 16:52:10 crc kubenswrapper[4995]: E0120 16:52:10.347627 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="ceilometer-notification-agent" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.347634 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="ceilometer-notification-agent" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.347799 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="ceilometer-notification-agent" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.347818 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="ceilometer-central-agent" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.347830 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="sg-core" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.347844 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" containerName="proxy-httpd" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.350307 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.352641 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.352642 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.368808 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.373540 4995 scope.go:117] "RemoveContainer" containerID="4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.398684 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.398753 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.398827 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-run-httpd\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.398908 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-log-httpd\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.399067 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-scripts\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.399179 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-config-data\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.399265 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rmc5\" (UniqueName: \"kubernetes.io/projected/c85c9716-e609-4d13-be5f-19b7867e4b3b-kube-api-access-4rmc5\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.408232 4995 scope.go:117] "RemoveContainer" containerID="6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5" Jan 20 16:52:10 crc kubenswrapper[4995]: E0120 16:52:10.412189 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5\": container with ID starting with 6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5 not found: ID does not exist" containerID="6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.412237 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5"} err="failed to get container status \"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5\": rpc error: code = NotFound desc = could not find container \"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5\": container with ID starting with 6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5 not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.412261 4995 scope.go:117] "RemoveContainer" containerID="1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e" Jan 20 16:52:10 crc kubenswrapper[4995]: E0120 16:52:10.412511 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e\": container with ID starting with 1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e not found: ID does not exist" containerID="1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.412531 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e"} err="failed to get container status \"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e\": rpc error: code = NotFound desc = could not find container \"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e\": container with ID starting with 1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.412542 4995 scope.go:117] "RemoveContainer" containerID="bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3" Jan 20 16:52:10 crc kubenswrapper[4995]: E0120 16:52:10.412724 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3\": container with ID starting with bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3 not found: ID does not exist" containerID="bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.412746 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3"} err="failed to get container status \"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3\": rpc error: code = NotFound desc = could not find container \"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3\": container with ID starting with bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3 not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.412760 4995 scope.go:117] "RemoveContainer" containerID="4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa" Jan 20 16:52:10 crc kubenswrapper[4995]: E0120 16:52:10.413034 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa\": container with ID starting with 4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa not found: ID does not exist" containerID="4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.413152 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa"} err="failed to get container status \"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa\": rpc error: code = NotFound desc = could not find container \"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa\": container with ID starting with 4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.413190 4995 scope.go:117] "RemoveContainer" containerID="6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.413480 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5"} err="failed to get container status \"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5\": rpc error: code = NotFound desc = could not find container \"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5\": container with ID starting with 6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5 not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.413507 4995 scope.go:117] "RemoveContainer" containerID="1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.414024 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e"} err="failed to get container status \"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e\": rpc error: code = NotFound desc = could not find container \"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e\": container with ID starting with 1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.414048 4995 scope.go:117] "RemoveContainer" containerID="bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.414427 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3"} err="failed to get container status \"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3\": rpc error: code = NotFound desc = could not find container \"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3\": container with ID starting with bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3 not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.414452 4995 scope.go:117] "RemoveContainer" containerID="4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.417765 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa"} err="failed to get container status \"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa\": rpc error: code = NotFound desc = could not find container \"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa\": container with ID starting with 4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.417800 4995 scope.go:117] "RemoveContainer" containerID="6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.418170 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5"} err="failed to get container status \"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5\": rpc error: code = NotFound desc = could not find container \"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5\": container with ID starting with 6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5 not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.418200 4995 scope.go:117] "RemoveContainer" containerID="1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.418582 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e"} err="failed to get container status \"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e\": rpc error: code = NotFound desc = could not find container \"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e\": container with ID starting with 1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.418602 4995 scope.go:117] "RemoveContainer" containerID="bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.419236 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3"} err="failed to get container status \"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3\": rpc error: code = NotFound desc = could not find container \"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3\": container with ID starting with bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3 not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.419256 4995 scope.go:117] "RemoveContainer" containerID="4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.419473 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa"} err="failed to get container status \"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa\": rpc error: code = NotFound desc = could not find container \"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa\": container with ID starting with 4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.419494 4995 scope.go:117] "RemoveContainer" containerID="6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.419760 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5"} err="failed to get container status \"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5\": rpc error: code = NotFound desc = could not find container \"6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5\": container with ID starting with 6e878ca730b0a86bb24c3ec76fc2b98ecd4aac185f2982ba7353752e7e2415a5 not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.419774 4995 scope.go:117] "RemoveContainer" containerID="1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.419952 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e"} err="failed to get container status \"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e\": rpc error: code = NotFound desc = could not find container \"1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e\": container with ID starting with 1e0b7aef87537a3beb93e066f5bf68f0dc41ee223e2b056fc6d77078de7e685e not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.419966 4995 scope.go:117] "RemoveContainer" containerID="bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.420160 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3"} err="failed to get container status \"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3\": rpc error: code = NotFound desc = could not find container \"bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3\": container with ID starting with bbb4811016071f5aba7b814967d0be3cfd490bfce27283b4faacaede924298d3 not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.420175 4995 scope.go:117] "RemoveContainer" containerID="4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.420344 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa"} err="failed to get container status \"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa\": rpc error: code = NotFound desc = could not find container \"4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa\": container with ID starting with 4749dac39c2aa7f8af83b3926baa84547b468d8316eaa1d0bebf63d67a6beffa not found: ID does not exist" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.501483 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.501551 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.501592 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-run-httpd\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.501659 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-log-httpd\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.501685 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-scripts\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.501727 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-config-data\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.501800 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rmc5\" (UniqueName: \"kubernetes.io/projected/c85c9716-e609-4d13-be5f-19b7867e4b3b-kube-api-access-4rmc5\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.502408 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-log-httpd\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.502944 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-run-httpd\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.505202 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-scripts\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.505444 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.505583 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.511830 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-config-data\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.528191 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rmc5\" (UniqueName: \"kubernetes.io/projected/c85c9716-e609-4d13-be5f-19b7867e4b3b-kube-api-access-4rmc5\") pod \"ceilometer-0\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " pod="openstack/ceilometer-0" Jan 20 16:52:10 crc kubenswrapper[4995]: I0120 16:52:10.675784 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.150273 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.248767 4995 generic.go:334] "Generic (PLEG): container finished" podID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerID="83f8aa16c6e0c99d44dc716b5a2f98a5b1ea1941246e20e15e580378bfb33b78" exitCode=0 Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.248830 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c66738b7-dac3-4487-b128-215f8e2eb48f","Type":"ContainerDied","Data":"83f8aa16c6e0c99d44dc716b5a2f98a5b1ea1941246e20e15e580378bfb33b78"} Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.250781 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerStarted","Data":"996f929498d013567addeb3bdaa061e402a6af64cfd7b4460b1185b8037ceed1"} Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.362127 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.419620 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-httpd-run\") pod \"c66738b7-dac3-4487-b128-215f8e2eb48f\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.419662 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-config-data\") pod \"c66738b7-dac3-4487-b128-215f8e2eb48f\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.419695 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"c66738b7-dac3-4487-b128-215f8e2eb48f\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.419760 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-public-tls-certs\") pod \"c66738b7-dac3-4487-b128-215f8e2eb48f\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.419866 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-combined-ca-bundle\") pod \"c66738b7-dac3-4487-b128-215f8e2eb48f\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.419912 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-scripts\") pod \"c66738b7-dac3-4487-b128-215f8e2eb48f\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.419958 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-logs\") pod \"c66738b7-dac3-4487-b128-215f8e2eb48f\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.419976 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t246w\" (UniqueName: \"kubernetes.io/projected/c66738b7-dac3-4487-b128-215f8e2eb48f-kube-api-access-t246w\") pod \"c66738b7-dac3-4487-b128-215f8e2eb48f\" (UID: \"c66738b7-dac3-4487-b128-215f8e2eb48f\") " Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.420585 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c66738b7-dac3-4487-b128-215f8e2eb48f" (UID: "c66738b7-dac3-4487-b128-215f8e2eb48f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.420862 4995 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.420884 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-logs" (OuterVolumeSpecName: "logs") pod "c66738b7-dac3-4487-b128-215f8e2eb48f" (UID: "c66738b7-dac3-4487-b128-215f8e2eb48f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.428630 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-scripts" (OuterVolumeSpecName: "scripts") pod "c66738b7-dac3-4487-b128-215f8e2eb48f" (UID: "c66738b7-dac3-4487-b128-215f8e2eb48f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.428644 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "c66738b7-dac3-4487-b128-215f8e2eb48f" (UID: "c66738b7-dac3-4487-b128-215f8e2eb48f"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.429823 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c66738b7-dac3-4487-b128-215f8e2eb48f-kube-api-access-t246w" (OuterVolumeSpecName: "kube-api-access-t246w") pod "c66738b7-dac3-4487-b128-215f8e2eb48f" (UID: "c66738b7-dac3-4487-b128-215f8e2eb48f"). InnerVolumeSpecName "kube-api-access-t246w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.472122 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c66738b7-dac3-4487-b128-215f8e2eb48f" (UID: "c66738b7-dac3-4487-b128-215f8e2eb48f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.489219 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c66738b7-dac3-4487-b128-215f8e2eb48f" (UID: "c66738b7-dac3-4487-b128-215f8e2eb48f"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.490725 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-config-data" (OuterVolumeSpecName: "config-data") pod "c66738b7-dac3-4487-b128-215f8e2eb48f" (UID: "c66738b7-dac3-4487-b128-215f8e2eb48f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.522220 4995 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.522249 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.522260 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.522268 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c66738b7-dac3-4487-b128-215f8e2eb48f-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.522277 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t246w\" (UniqueName: \"kubernetes.io/projected/c66738b7-dac3-4487-b128-215f8e2eb48f-kube-api-access-t246w\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.522288 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c66738b7-dac3-4487-b128-215f8e2eb48f-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.522312 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.542362 4995 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.624285 4995 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:11 crc kubenswrapper[4995]: I0120 16:52:11.999329 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19a0bd62-8fb6-4078-af42-ac2f1e919071" path="/var/lib/kubelet/pods/19a0bd62-8fb6-4078-af42-ac2f1e919071/volumes" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.265049 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerStarted","Data":"606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893"} Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.267528 4995 generic.go:334] "Generic (PLEG): container finished" podID="3d353583-476a-415d-ad4f-21c4e7db21f4" containerID="777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445" exitCode=0 Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.267594 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3d353583-476a-415d-ad4f-21c4e7db21f4","Type":"ContainerDied","Data":"777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445"} Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.270541 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c66738b7-dac3-4487-b128-215f8e2eb48f","Type":"ContainerDied","Data":"397c15fbb1de49ccf09d0dd2ab62becd0ac14e7f380c22aefc223a9d27683b0a"} Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.270704 4995 scope.go:117] "RemoveContainer" containerID="83f8aa16c6e0c99d44dc716b5a2f98a5b1ea1941246e20e15e580378bfb33b78" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.270864 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.391770 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.413252 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.429336 4995 scope.go:117] "RemoveContainer" containerID="1daf20f0c67b095f761b9f1f0f0a04598d305aeda734d3601e0911d7243afb73" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.429455 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:52:12 crc kubenswrapper[4995]: E0120 16:52:12.429858 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerName="glance-log" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.429881 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerName="glance-log" Jan 20 16:52:12 crc kubenswrapper[4995]: E0120 16:52:12.429905 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerName="glance-httpd" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.429912 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerName="glance-httpd" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.430098 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerName="glance-log" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.430115 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c66738b7-dac3-4487-b128-215f8e2eb48f" containerName="glance-httpd" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.431108 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.439921 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.440181 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.440922 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.561227 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.561615 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-scripts\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.561649 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.561725 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-logs\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.561808 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.561854 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rqcj\" (UniqueName: \"kubernetes.io/projected/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-kube-api-access-5rqcj\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.561891 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-config-data\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.561907 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.664455 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-logs\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.664559 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.664605 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rqcj\" (UniqueName: \"kubernetes.io/projected/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-kube-api-access-5rqcj\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.664645 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-config-data\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.664662 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.664699 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.664724 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-scripts\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.664748 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.664922 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-logs\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.665031 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.666033 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.669269 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-scripts\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.670181 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.671676 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-config-data\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.672219 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.682807 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.713964 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rqcj\" (UniqueName: \"kubernetes.io/projected/4a7fd66d-0211-429d-8dfa-7a29ca98ab51-kube-api-access-5rqcj\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.765510 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-config-data\") pod \"3d353583-476a-415d-ad4f-21c4e7db21f4\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.765619 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rq5g\" (UniqueName: \"kubernetes.io/projected/3d353583-476a-415d-ad4f-21c4e7db21f4-kube-api-access-9rq5g\") pod \"3d353583-476a-415d-ad4f-21c4e7db21f4\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.765849 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-combined-ca-bundle\") pod \"3d353583-476a-415d-ad4f-21c4e7db21f4\" (UID: \"3d353583-476a-415d-ad4f-21c4e7db21f4\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.769481 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4a7fd66d-0211-429d-8dfa-7a29ca98ab51\") " pod="openstack/glance-default-external-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.791196 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-config-data" (OuterVolumeSpecName: "config-data") pod "3d353583-476a-415d-ad4f-21c4e7db21f4" (UID: "3d353583-476a-415d-ad4f-21c4e7db21f4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.797198 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d353583-476a-415d-ad4f-21c4e7db21f4-kube-api-access-9rq5g" (OuterVolumeSpecName: "kube-api-access-9rq5g") pod "3d353583-476a-415d-ad4f-21c4e7db21f4" (UID: "3d353583-476a-415d-ad4f-21c4e7db21f4"). InnerVolumeSpecName "kube-api-access-9rq5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.806741 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d353583-476a-415d-ad4f-21c4e7db21f4" (UID: "3d353583-476a-415d-ad4f-21c4e7db21f4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.806934 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.866989 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-scripts\") pod \"3edee2ed-6825-49d5-9556-a33a54331f20\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.867069 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-config-data\") pod \"3edee2ed-6825-49d5-9556-a33a54331f20\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.867165 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-internal-tls-certs\") pod \"3edee2ed-6825-49d5-9556-a33a54331f20\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.867562 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qc6xr\" (UniqueName: \"kubernetes.io/projected/3edee2ed-6825-49d5-9556-a33a54331f20-kube-api-access-qc6xr\") pod \"3edee2ed-6825-49d5-9556-a33a54331f20\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.867625 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"3edee2ed-6825-49d5-9556-a33a54331f20\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.867659 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-httpd-run\") pod \"3edee2ed-6825-49d5-9556-a33a54331f20\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.867702 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-logs\") pod \"3edee2ed-6825-49d5-9556-a33a54331f20\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.867735 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-combined-ca-bundle\") pod \"3edee2ed-6825-49d5-9556-a33a54331f20\" (UID: \"3edee2ed-6825-49d5-9556-a33a54331f20\") " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.868968 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.868995 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d353583-476a-415d-ad4f-21c4e7db21f4-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.869011 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rq5g\" (UniqueName: \"kubernetes.io/projected/3d353583-476a-415d-ad4f-21c4e7db21f4-kube-api-access-9rq5g\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.870962 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "3edee2ed-6825-49d5-9556-a33a54331f20" (UID: "3edee2ed-6825-49d5-9556-a33a54331f20"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.871303 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3edee2ed-6825-49d5-9556-a33a54331f20" (UID: "3edee2ed-6825-49d5-9556-a33a54331f20"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.871838 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-scripts" (OuterVolumeSpecName: "scripts") pod "3edee2ed-6825-49d5-9556-a33a54331f20" (UID: "3edee2ed-6825-49d5-9556-a33a54331f20"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.871915 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-logs" (OuterVolumeSpecName: "logs") pod "3edee2ed-6825-49d5-9556-a33a54331f20" (UID: "3edee2ed-6825-49d5-9556-a33a54331f20"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.873587 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3edee2ed-6825-49d5-9556-a33a54331f20-kube-api-access-qc6xr" (OuterVolumeSpecName: "kube-api-access-qc6xr") pod "3edee2ed-6825-49d5-9556-a33a54331f20" (UID: "3edee2ed-6825-49d5-9556-a33a54331f20"). InnerVolumeSpecName "kube-api-access-qc6xr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.899503 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3edee2ed-6825-49d5-9556-a33a54331f20" (UID: "3edee2ed-6825-49d5-9556-a33a54331f20"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.934195 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3edee2ed-6825-49d5-9556-a33a54331f20" (UID: "3edee2ed-6825-49d5-9556-a33a54331f20"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.936331 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-config-data" (OuterVolumeSpecName: "config-data") pod "3edee2ed-6825-49d5-9556-a33a54331f20" (UID: "3edee2ed-6825-49d5-9556-a33a54331f20"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.970959 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.971003 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.971021 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.971032 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.971044 4995 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3edee2ed-6825-49d5-9556-a33a54331f20-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.971055 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qc6xr\" (UniqueName: \"kubernetes.io/projected/3edee2ed-6825-49d5-9556-a33a54331f20-kube-api-access-qc6xr\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.971108 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 20 16:52:12 crc kubenswrapper[4995]: I0120 16:52:12.971117 4995 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3edee2ed-6825-49d5-9556-a33a54331f20-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.000123 4995 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.066169 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.072716 4995 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.286349 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3d353583-476a-415d-ad4f-21c4e7db21f4","Type":"ContainerDied","Data":"0c193d4dbb143855731ab59cc8f4d83eef326e44eff37c6334c02ccb988f0770"} Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.286584 4995 scope.go:117] "RemoveContainer" containerID="777c7dc563b4256c1ea35372811ccc88b7aed3df963f5c3c8a4a5281f018f445" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.286730 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.296275 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerStarted","Data":"41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc"} Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.300097 4995 generic.go:334] "Generic (PLEG): container finished" podID="3edee2ed-6825-49d5-9556-a33a54331f20" containerID="285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c" exitCode=0 Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.300135 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3edee2ed-6825-49d5-9556-a33a54331f20","Type":"ContainerDied","Data":"285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c"} Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.300160 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3edee2ed-6825-49d5-9556-a33a54331f20","Type":"ContainerDied","Data":"efe2942923dec6a397ae7d80bdd1da1ad0fd91ccb274be31c4b2d0c2ffbfd7f2"} Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.300228 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.339290 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.354707 4995 scope.go:117] "RemoveContainer" containerID="285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.358661 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.381971 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.394205 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 16:52:13 crc kubenswrapper[4995]: E0120 16:52:13.394674 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d353583-476a-415d-ad4f-21c4e7db21f4" containerName="nova-cell0-conductor-conductor" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.394689 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d353583-476a-415d-ad4f-21c4e7db21f4" containerName="nova-cell0-conductor-conductor" Jan 20 16:52:13 crc kubenswrapper[4995]: E0120 16:52:13.394703 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3edee2ed-6825-49d5-9556-a33a54331f20" containerName="glance-httpd" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.394712 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3edee2ed-6825-49d5-9556-a33a54331f20" containerName="glance-httpd" Jan 20 16:52:13 crc kubenswrapper[4995]: E0120 16:52:13.394735 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3edee2ed-6825-49d5-9556-a33a54331f20" containerName="glance-log" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.394743 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3edee2ed-6825-49d5-9556-a33a54331f20" containerName="glance-log" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.394988 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d353583-476a-415d-ad4f-21c4e7db21f4" containerName="nova-cell0-conductor-conductor" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.395000 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3edee2ed-6825-49d5-9556-a33a54331f20" containerName="glance-log" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.395021 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3edee2ed-6825-49d5-9556-a33a54331f20" containerName="glance-httpd" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.395787 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.399273 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.399500 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-s86mn" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.405238 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.417521 4995 scope.go:117] "RemoveContainer" containerID="11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.420028 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.432660 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.434542 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.439672 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.439860 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.445706 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.480327 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b40b3bf2-fecd-4b7b-8110-7f15651792f3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.480374 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b40b3bf2-fecd-4b7b-8110-7f15651792f3-logs\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.480435 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18edb0dd-f0be-4f0e-b860-cf6cc5b67745-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"18edb0dd-f0be-4f0e-b860-cf6cc5b67745\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.480862 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.480920 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18edb0dd-f0be-4f0e-b860-cf6cc5b67745-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"18edb0dd-f0be-4f0e-b860-cf6cc5b67745\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.481150 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.481192 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.481246 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ms9nh\" (UniqueName: \"kubernetes.io/projected/b40b3bf2-fecd-4b7b-8110-7f15651792f3-kube-api-access-ms9nh\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.481320 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.481347 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.481391 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqmzh\" (UniqueName: \"kubernetes.io/projected/18edb0dd-f0be-4f0e-b860-cf6cc5b67745-kube-api-access-jqmzh\") pod \"nova-cell0-conductor-0\" (UID: \"18edb0dd-f0be-4f0e-b860-cf6cc5b67745\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.538498 4995 scope.go:117] "RemoveContainer" containerID="285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c" Jan 20 16:52:13 crc kubenswrapper[4995]: E0120 16:52:13.538967 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c\": container with ID starting with 285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c not found: ID does not exist" containerID="285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.539009 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c"} err="failed to get container status \"285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c\": rpc error: code = NotFound desc = could not find container \"285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c\": container with ID starting with 285a5d29e319cd1ef93ba7d54732fa4fc78dcd5dd0f5a6113e60c46a4d9c3d0c not found: ID does not exist" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.539034 4995 scope.go:117] "RemoveContainer" containerID="11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b" Jan 20 16:52:13 crc kubenswrapper[4995]: E0120 16:52:13.539398 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b\": container with ID starting with 11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b not found: ID does not exist" containerID="11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.539428 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b"} err="failed to get container status \"11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b\": rpc error: code = NotFound desc = could not find container \"11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b\": container with ID starting with 11e9464a3ca5bd7dc9e460b0e11f388afd51d2d871947b0be13fe1a60aa2c03b not found: ID does not exist" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.583652 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b40b3bf2-fecd-4b7b-8110-7f15651792f3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.583702 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b40b3bf2-fecd-4b7b-8110-7f15651792f3-logs\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.583747 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18edb0dd-f0be-4f0e-b860-cf6cc5b67745-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"18edb0dd-f0be-4f0e-b860-cf6cc5b67745\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.583798 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.583826 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18edb0dd-f0be-4f0e-b860-cf6cc5b67745-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"18edb0dd-f0be-4f0e-b860-cf6cc5b67745\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.583916 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.583943 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.583966 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ms9nh\" (UniqueName: \"kubernetes.io/projected/b40b3bf2-fecd-4b7b-8110-7f15651792f3-kube-api-access-ms9nh\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.584000 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.584019 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.584032 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqmzh\" (UniqueName: \"kubernetes.io/projected/18edb0dd-f0be-4f0e-b860-cf6cc5b67745-kube-api-access-jqmzh\") pod \"nova-cell0-conductor-0\" (UID: \"18edb0dd-f0be-4f0e-b860-cf6cc5b67745\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.584376 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b40b3bf2-fecd-4b7b-8110-7f15651792f3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.584779 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b40b3bf2-fecd-4b7b-8110-7f15651792f3-logs\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.585703 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.589465 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.593010 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18edb0dd-f0be-4f0e-b860-cf6cc5b67745-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"18edb0dd-f0be-4f0e-b860-cf6cc5b67745\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.593025 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18edb0dd-f0be-4f0e-b860-cf6cc5b67745-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"18edb0dd-f0be-4f0e-b860-cf6cc5b67745\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.593664 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.594777 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.595489 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b40b3bf2-fecd-4b7b-8110-7f15651792f3-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.607600 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ms9nh\" (UniqueName: \"kubernetes.io/projected/b40b3bf2-fecd-4b7b-8110-7f15651792f3-kube-api-access-ms9nh\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.618170 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.621797 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqmzh\" (UniqueName: \"kubernetes.io/projected/18edb0dd-f0be-4f0e-b860-cf6cc5b67745-kube-api-access-jqmzh\") pod \"nova-cell0-conductor-0\" (UID: \"18edb0dd-f0be-4f0e-b860-cf6cc5b67745\") " pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.638648 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-internal-api-0\" (UID: \"b40b3bf2-fecd-4b7b-8110-7f15651792f3\") " pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.662627 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:13 crc kubenswrapper[4995]: I0120 16:52:13.723191 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:14 crc kubenswrapper[4995]: I0120 16:52:14.034584 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d353583-476a-415d-ad4f-21c4e7db21f4" path="/var/lib/kubelet/pods/3d353583-476a-415d-ad4f-21c4e7db21f4/volumes" Jan 20 16:52:14 crc kubenswrapper[4995]: I0120 16:52:14.035667 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3edee2ed-6825-49d5-9556-a33a54331f20" path="/var/lib/kubelet/pods/3edee2ed-6825-49d5-9556-a33a54331f20/volumes" Jan 20 16:52:14 crc kubenswrapper[4995]: I0120 16:52:14.058126 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c66738b7-dac3-4487-b128-215f8e2eb48f" path="/var/lib/kubelet/pods/c66738b7-dac3-4487-b128-215f8e2eb48f/volumes" Jan 20 16:52:14 crc kubenswrapper[4995]: I0120 16:52:14.107464 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 16:52:14 crc kubenswrapper[4995]: W0120 16:52:14.119237 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb40b3bf2_fecd_4b7b_8110_7f15651792f3.slice/crio-9143105bc63c4780df2a533094470dd15f60dc76a55a80f76a1cab31e47a87a9 WatchSource:0}: Error finding container 9143105bc63c4780df2a533094470dd15f60dc76a55a80f76a1cab31e47a87a9: Status 404 returned error can't find the container with id 9143105bc63c4780df2a533094470dd15f60dc76a55a80f76a1cab31e47a87a9 Jan 20 16:52:14 crc kubenswrapper[4995]: I0120 16:52:14.315550 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a7fd66d-0211-429d-8dfa-7a29ca98ab51","Type":"ContainerStarted","Data":"abf1aeb59aebab57a6b485d2181e45601f6b7f552f9e52d4c84065ae67c1c58e"} Jan 20 16:52:14 crc kubenswrapper[4995]: I0120 16:52:14.320013 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerStarted","Data":"af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737"} Jan 20 16:52:14 crc kubenswrapper[4995]: I0120 16:52:14.321306 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b40b3bf2-fecd-4b7b-8110-7f15651792f3","Type":"ContainerStarted","Data":"9143105bc63c4780df2a533094470dd15f60dc76a55a80f76a1cab31e47a87a9"} Jan 20 16:52:14 crc kubenswrapper[4995]: I0120 16:52:14.399966 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.334126 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"18edb0dd-f0be-4f0e-b860-cf6cc5b67745","Type":"ContainerStarted","Data":"0c8ef00c93619b66773be298d49844df0b08f9fed46c1a05f91d72b9e778e469"} Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.334372 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"18edb0dd-f0be-4f0e-b860-cf6cc5b67745","Type":"ContainerStarted","Data":"56690b6f6143dfd3f70a1ac358e6c961a84db9a6c3de3362e4ccda7f930c8fb6"} Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.335185 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.338541 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a7fd66d-0211-429d-8dfa-7a29ca98ab51","Type":"ContainerStarted","Data":"d41109645119d780ffe80c051e92b3efa594e4c6069fe5d5bcf64d8458b2d40f"} Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.338591 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4a7fd66d-0211-429d-8dfa-7a29ca98ab51","Type":"ContainerStarted","Data":"31e0f818cec851a3f30f77c4d02f7f6ee015b29f3732e204e7c3e4233ce06072"} Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.346321 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerStarted","Data":"14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65"} Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.346387 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.350895 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b40b3bf2-fecd-4b7b-8110-7f15651792f3","Type":"ContainerStarted","Data":"d810a5ff241ac8ede2def251ebe9ff62025706eae76f8d331296da3e459af481"} Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.360593 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.360569478 podStartE2EDuration="2.360569478s" podCreationTimestamp="2026-01-20 16:52:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:15.349308663 +0000 UTC m=+1253.593913459" watchObservedRunningTime="2026-01-20 16:52:15.360569478 +0000 UTC m=+1253.605174284" Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.387828 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.487098706 podStartE2EDuration="5.387809047s" podCreationTimestamp="2026-01-20 16:52:10 +0000 UTC" firstStartedPulling="2026-01-20 16:52:11.165972295 +0000 UTC m=+1249.410577101" lastFinishedPulling="2026-01-20 16:52:15.066682626 +0000 UTC m=+1253.311287442" observedRunningTime="2026-01-20 16:52:15.374108475 +0000 UTC m=+1253.618713321" watchObservedRunningTime="2026-01-20 16:52:15.387809047 +0000 UTC m=+1253.632413853" Jan 20 16:52:15 crc kubenswrapper[4995]: I0120 16:52:15.405536 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.405521076 podStartE2EDuration="3.405521076s" podCreationTimestamp="2026-01-20 16:52:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:15.396126672 +0000 UTC m=+1253.640731478" watchObservedRunningTime="2026-01-20 16:52:15.405521076 +0000 UTC m=+1253.650125882" Jan 20 16:52:16 crc kubenswrapper[4995]: I0120 16:52:16.364804 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b40b3bf2-fecd-4b7b-8110-7f15651792f3","Type":"ContainerStarted","Data":"1f57d0bbfae8ca1c50b35e07d8aff6b925fb4715a6332980b95302778165586a"} Jan 20 16:52:16 crc kubenswrapper[4995]: I0120 16:52:16.392840 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.39281833 podStartE2EDuration="3.39281833s" podCreationTimestamp="2026-01-20 16:52:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:16.386014515 +0000 UTC m=+1254.630619331" watchObservedRunningTime="2026-01-20 16:52:16.39281833 +0000 UTC m=+1254.637423136" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.066679 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.067297 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.101760 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.112829 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.448385 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.448662 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.663386 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.663471 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.713892 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.717298 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:23 crc kubenswrapper[4995]: I0120 16:52:23.778839 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.260104 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-c4552"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.261763 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.264060 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.264419 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.280125 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-c4552"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.388233 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.388913 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrptn\" (UniqueName: \"kubernetes.io/projected/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-kube-api-access-xrptn\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.389533 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-config-data\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.390042 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-scripts\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.425845 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.430266 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.433012 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.448480 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.461520 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.461550 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.491462 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-config-data\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.491541 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-scripts\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.491606 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.491637 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrptn\" (UniqueName: \"kubernetes.io/projected/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-kube-api-access-xrptn\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.504012 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.504549 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-config-data\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.506659 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-scripts\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.517671 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrptn\" (UniqueName: \"kubernetes.io/projected/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-kube-api-access-xrptn\") pod \"nova-cell0-cell-mapping-c4552\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.582373 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.597059 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-config-data\") pod \"nova-scheduler-0\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.598793 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.598851 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mjgl\" (UniqueName: \"kubernetes.io/projected/0c33ee49-46b9-41c7-84c0-2828d35aa505-kube-api-access-6mjgl\") pod \"nova-scheduler-0\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.656405 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.658518 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.669033 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.672273 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.677710 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.686515 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.700022 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.700086 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mjgl\" (UniqueName: \"kubernetes.io/projected/0c33ee49-46b9-41c7-84c0-2828d35aa505-kube-api-access-6mjgl\") pod \"nova-scheduler-0\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.700152 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-config-data\") pod \"nova-scheduler-0\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.706016 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-config-data\") pod \"nova-scheduler-0\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.735638 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.735839 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.750678 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mjgl\" (UniqueName: \"kubernetes.io/projected/0c33ee49-46b9-41c7-84c0-2828d35aa505-kube-api-access-6mjgl\") pod \"nova-scheduler-0\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.760754 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.786431 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.802258 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.802397 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22rg9\" (UniqueName: \"kubernetes.io/projected/b737c895-a034-43d1-80a9-e6e6dd980789-kube-api-access-22rg9\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.802450 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-config-data\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.802488 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cwrn\" (UniqueName: \"kubernetes.io/projected/8c423fbf-ef87-43a8-92ca-374891812cc8-kube-api-access-4cwrn\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.810261 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b737c895-a034-43d1-80a9-e6e6dd980789-logs\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.810323 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c423fbf-ef87-43a8-92ca-374891812cc8-logs\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.810466 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-config-data\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.810513 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.823569 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.824716 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.826608 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.849398 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.859794 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2qxp5"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.861351 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.869928 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2qxp5"] Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.926828 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.926916 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22rg9\" (UniqueName: \"kubernetes.io/projected/b737c895-a034-43d1-80a9-e6e6dd980789-kube-api-access-22rg9\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.926951 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-config-data\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.926977 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cwrn\" (UniqueName: \"kubernetes.io/projected/8c423fbf-ef87-43a8-92ca-374891812cc8-kube-api-access-4cwrn\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.933755 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-config-data\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.933891 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b737c895-a034-43d1-80a9-e6e6dd980789-logs\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.934389 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b737c895-a034-43d1-80a9-e6e6dd980789-logs\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.934903 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c423fbf-ef87-43a8-92ca-374891812cc8-logs\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.935121 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-config-data\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.935377 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c423fbf-ef87-43a8-92ca-374891812cc8-logs\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.935613 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.943499 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-config-data\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.944764 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.972452 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.986227 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22rg9\" (UniqueName: \"kubernetes.io/projected/b737c895-a034-43d1-80a9-e6e6dd980789-kube-api-access-22rg9\") pod \"nova-metadata-0\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " pod="openstack/nova-metadata-0" Jan 20 16:52:24 crc kubenswrapper[4995]: I0120 16:52:24.995280 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cwrn\" (UniqueName: \"kubernetes.io/projected/8c423fbf-ef87-43a8-92ca-374891812cc8-kube-api-access-4cwrn\") pod \"nova-api-0\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " pod="openstack/nova-api-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.041902 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvt9t\" (UniqueName: \"kubernetes.io/projected/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-kube-api-access-dvt9t\") pod \"nova-cell1-novncproxy-0\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.042033 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-config\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.042494 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.042818 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.042906 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gp72\" (UniqueName: \"kubernetes.io/projected/7df7445f-fe61-4af9-9f97-6edeab0ab979-kube-api-access-6gp72\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.042986 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.043541 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.043642 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-svc\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.043690 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.136929 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.150710 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.151383 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.151460 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gp72\" (UniqueName: \"kubernetes.io/projected/7df7445f-fe61-4af9-9f97-6edeab0ab979-kube-api-access-6gp72\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.151504 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.151569 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.151594 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-svc\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.151616 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.151684 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvt9t\" (UniqueName: \"kubernetes.io/projected/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-kube-api-access-dvt9t\") pod \"nova-cell1-novncproxy-0\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.151729 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-config\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.151795 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.153793 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.154626 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.155641 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-svc\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.156133 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-config\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.156272 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.164154 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.166744 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.178561 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gp72\" (UniqueName: \"kubernetes.io/projected/7df7445f-fe61-4af9-9f97-6edeab0ab979-kube-api-access-6gp72\") pod \"dnsmasq-dns-757b4f8459-2qxp5\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.179115 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.180916 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvt9t\" (UniqueName: \"kubernetes.io/projected/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-kube-api-access-dvt9t\") pod \"nova-cell1-novncproxy-0\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.459655 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.470612 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-c4552"] Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.508505 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.538203 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.737490 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:25 crc kubenswrapper[4995]: W0120 16:52:25.765631 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb737c895_a034_43d1_80a9_e6e6dd980789.slice/crio-dc053692beb0cbca7705b43b0b826a7f776696126d8e49cda8f58730e4e55224 WatchSource:0}: Error finding container dc053692beb0cbca7705b43b0b826a7f776696126d8e49cda8f58730e4e55224: Status 404 returned error can't find the container with id dc053692beb0cbca7705b43b0b826a7f776696126d8e49cda8f58730e4e55224 Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.805067 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qcr9x"] Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.806618 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.812830 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.813336 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.827556 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qcr9x"] Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.885329 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzzb6\" (UniqueName: \"kubernetes.io/projected/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-kube-api-access-fzzb6\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.885710 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-scripts\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.885859 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-config-data\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.885953 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.891023 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:52:25 crc kubenswrapper[4995]: W0120 16:52:25.894245 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c423fbf_ef87_43a8_92ca_374891812cc8.slice/crio-0fb528cf0b4e5663fb621fb66a0f39f95854614b5825f0a438fa1c46e5567ef5 WatchSource:0}: Error finding container 0fb528cf0b4e5663fb621fb66a0f39f95854614b5825f0a438fa1c46e5567ef5: Status 404 returned error can't find the container with id 0fb528cf0b4e5663fb621fb66a0f39f95854614b5825f0a438fa1c46e5567ef5 Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.988903 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-config-data\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.988988 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.995125 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzzb6\" (UniqueName: \"kubernetes.io/projected/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-kube-api-access-fzzb6\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.995202 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-scripts\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.995408 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-config-data\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:25 crc kubenswrapper[4995]: I0120 16:52:25.998550 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.000119 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-scripts\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.004613 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2qxp5"] Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.014137 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzzb6\" (UniqueName: \"kubernetes.io/projected/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-kube-api-access-fzzb6\") pod \"nova-cell1-conductor-db-sync-qcr9x\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.046245 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.046359 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.144022 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.192831 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.424663 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.581008 4995 generic.go:334] "Generic (PLEG): container finished" podID="7df7445f-fe61-4af9-9f97-6edeab0ab979" containerID="e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196" exitCode=0 Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.581761 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" event={"ID":"7df7445f-fe61-4af9-9f97-6edeab0ab979","Type":"ContainerDied","Data":"e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196"} Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.581802 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" event={"ID":"7df7445f-fe61-4af9-9f97-6edeab0ab979","Type":"ContainerStarted","Data":"b99addc256e72c4236da4a7b17bf7701e0313a351f679b0e2305b707233021d8"} Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.600365 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0c33ee49-46b9-41c7-84c0-2828d35aa505","Type":"ContainerStarted","Data":"7c84e2bca1aa6ef86947dfc60ce554ce5e2e142dcb48e62de4bdb101d69f2e53"} Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.627418 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-c4552" event={"ID":"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5","Type":"ContainerStarted","Data":"daa09eef2ead9557850538a2a180ef185523c81eb1fcd4785f5965b2917e5edf"} Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.627721 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-c4552" event={"ID":"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5","Type":"ContainerStarted","Data":"05292e290880be2328141094f47f86e53bf60a98256d4bdc26f84264c176b990"} Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.630491 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8c423fbf-ef87-43a8-92ca-374891812cc8","Type":"ContainerStarted","Data":"0fb528cf0b4e5663fb621fb66a0f39f95854614b5825f0a438fa1c46e5567ef5"} Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.631695 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c72f2733-e3bf-4064-9a92-9e802fd8cf9e","Type":"ContainerStarted","Data":"3c2bdfdf682696bac01eee5b672d85023ef5b211086b26094765598640c59adb"} Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.638144 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b737c895-a034-43d1-80a9-e6e6dd980789","Type":"ContainerStarted","Data":"dc053692beb0cbca7705b43b0b826a7f776696126d8e49cda8f58730e4e55224"} Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.638203 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.638212 4995 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.661607 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-c4552" podStartSLOduration=2.6615874699999997 podStartE2EDuration="2.66158747s" podCreationTimestamp="2026-01-20 16:52:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:26.642698808 +0000 UTC m=+1264.887303614" watchObservedRunningTime="2026-01-20 16:52:26.66158747 +0000 UTC m=+1264.906192276" Jan 20 16:52:26 crc kubenswrapper[4995]: I0120 16:52:26.775324 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qcr9x"] Jan 20 16:52:26 crc kubenswrapper[4995]: W0120 16:52:26.799874 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0cf9607f_a24d_47f2_bcde_a777ed54f2a8.slice/crio-e07ab32746bf7b0291e6366a04d823a29db79066f61e0596c77dc9d8a21d2659 WatchSource:0}: Error finding container e07ab32746bf7b0291e6366a04d823a29db79066f61e0596c77dc9d8a21d2659: Status 404 returned error can't find the container with id e07ab32746bf7b0291e6366a04d823a29db79066f61e0596c77dc9d8a21d2659 Jan 20 16:52:27 crc kubenswrapper[4995]: I0120 16:52:27.336291 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:27 crc kubenswrapper[4995]: I0120 16:52:27.336797 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 20 16:52:27 crc kubenswrapper[4995]: I0120 16:52:27.669384 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-qcr9x" event={"ID":"0cf9607f-a24d-47f2-bcde-a777ed54f2a8","Type":"ContainerStarted","Data":"2466dc2d36864131fa8bf12c0a882e03e2f357f5041d857fde5e7b5e7ac19837"} Jan 20 16:52:27 crc kubenswrapper[4995]: I0120 16:52:27.669430 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-qcr9x" event={"ID":"0cf9607f-a24d-47f2-bcde-a777ed54f2a8","Type":"ContainerStarted","Data":"e07ab32746bf7b0291e6366a04d823a29db79066f61e0596c77dc9d8a21d2659"} Jan 20 16:52:27 crc kubenswrapper[4995]: I0120 16:52:27.674211 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" event={"ID":"7df7445f-fe61-4af9-9f97-6edeab0ab979","Type":"ContainerStarted","Data":"297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603"} Jan 20 16:52:27 crc kubenswrapper[4995]: I0120 16:52:27.675010 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:27 crc kubenswrapper[4995]: I0120 16:52:27.721520 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-qcr9x" podStartSLOduration=2.721500711 podStartE2EDuration="2.721500711s" podCreationTimestamp="2026-01-20 16:52:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:27.707407479 +0000 UTC m=+1265.952012315" watchObservedRunningTime="2026-01-20 16:52:27.721500711 +0000 UTC m=+1265.966105517" Jan 20 16:52:27 crc kubenswrapper[4995]: I0120 16:52:27.747416 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" podStartSLOduration=3.747394662 podStartE2EDuration="3.747394662s" podCreationTimestamp="2026-01-20 16:52:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:27.736379204 +0000 UTC m=+1265.980984010" watchObservedRunningTime="2026-01-20 16:52:27.747394662 +0000 UTC m=+1265.991999468" Jan 20 16:52:28 crc kubenswrapper[4995]: I0120 16:52:28.329588 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:28 crc kubenswrapper[4995]: I0120 16:52:28.367019 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.698543 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8c423fbf-ef87-43a8-92ca-374891812cc8","Type":"ContainerStarted","Data":"beb6abf5934bded5478dc89e039bd18eea65f10e199fecfb5cb5ae38761c27f1"} Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.699870 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8c423fbf-ef87-43a8-92ca-374891812cc8","Type":"ContainerStarted","Data":"bfcb1dc02ef4fa570ec58c96ed9598e7be7a64980297ac215fd2a1755625f24b"} Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.699952 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c72f2733-e3bf-4064-9a92-9e802fd8cf9e","Type":"ContainerStarted","Data":"ffcaba083debd4d1dc554202803fc5925daa05eb5fdbd7300f004f19ef4760b3"} Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.699925 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="c72f2733-e3bf-4064-9a92-9e802fd8cf9e" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://ffcaba083debd4d1dc554202803fc5925daa05eb5fdbd7300f004f19ef4760b3" gracePeriod=30 Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.703098 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b737c895-a034-43d1-80a9-e6e6dd980789","Type":"ContainerStarted","Data":"dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50"} Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.703138 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b737c895-a034-43d1-80a9-e6e6dd980789","Type":"ContainerStarted","Data":"a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df"} Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.703201 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b737c895-a034-43d1-80a9-e6e6dd980789" containerName="nova-metadata-metadata" containerID="cri-o://dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50" gracePeriod=30 Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.703810 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b737c895-a034-43d1-80a9-e6e6dd980789" containerName="nova-metadata-log" containerID="cri-o://a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df" gracePeriod=30 Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.705398 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0c33ee49-46b9-41c7-84c0-2828d35aa505","Type":"ContainerStarted","Data":"fcbd88eb50da2f98a1d828ba593263b64b8b98d5c58f7646dd278a5a44d11cb1"} Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.724439 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.8630412290000002 podStartE2EDuration="6.724420463s" podCreationTimestamp="2026-01-20 16:52:24 +0000 UTC" firstStartedPulling="2026-01-20 16:52:25.898900433 +0000 UTC m=+1264.143505239" lastFinishedPulling="2026-01-20 16:52:29.760279667 +0000 UTC m=+1268.004884473" observedRunningTime="2026-01-20 16:52:30.718856512 +0000 UTC m=+1268.963461318" watchObservedRunningTime="2026-01-20 16:52:30.724420463 +0000 UTC m=+1268.969025269" Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.760002 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.773740119 podStartE2EDuration="6.759980657s" podCreationTimestamp="2026-01-20 16:52:24 +0000 UTC" firstStartedPulling="2026-01-20 16:52:25.775214941 +0000 UTC m=+1264.019819747" lastFinishedPulling="2026-01-20 16:52:29.761455479 +0000 UTC m=+1268.006060285" observedRunningTime="2026-01-20 16:52:30.742491962 +0000 UTC m=+1268.987096838" watchObservedRunningTime="2026-01-20 16:52:30.759980657 +0000 UTC m=+1269.004585463" Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.775202 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.530338192 podStartE2EDuration="6.775179168s" podCreationTimestamp="2026-01-20 16:52:24 +0000 UTC" firstStartedPulling="2026-01-20 16:52:25.53788354 +0000 UTC m=+1263.782488346" lastFinishedPulling="2026-01-20 16:52:29.782724516 +0000 UTC m=+1268.027329322" observedRunningTime="2026-01-20 16:52:30.764643043 +0000 UTC m=+1269.009247849" watchObservedRunningTime="2026-01-20 16:52:30.775179168 +0000 UTC m=+1269.019783974" Jan 20 16:52:30 crc kubenswrapper[4995]: I0120 16:52:30.791357 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.193842983 podStartE2EDuration="6.791338126s" podCreationTimestamp="2026-01-20 16:52:24 +0000 UTC" firstStartedPulling="2026-01-20 16:52:26.164018887 +0000 UTC m=+1264.408623693" lastFinishedPulling="2026-01-20 16:52:29.76151403 +0000 UTC m=+1268.006118836" observedRunningTime="2026-01-20 16:52:30.788582802 +0000 UTC m=+1269.033187608" watchObservedRunningTime="2026-01-20 16:52:30.791338126 +0000 UTC m=+1269.035942962" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.363609 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.436458 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22rg9\" (UniqueName: \"kubernetes.io/projected/b737c895-a034-43d1-80a9-e6e6dd980789-kube-api-access-22rg9\") pod \"b737c895-a034-43d1-80a9-e6e6dd980789\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.436578 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-config-data\") pod \"b737c895-a034-43d1-80a9-e6e6dd980789\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.436642 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b737c895-a034-43d1-80a9-e6e6dd980789-logs\") pod \"b737c895-a034-43d1-80a9-e6e6dd980789\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.437102 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b737c895-a034-43d1-80a9-e6e6dd980789-logs" (OuterVolumeSpecName: "logs") pod "b737c895-a034-43d1-80a9-e6e6dd980789" (UID: "b737c895-a034-43d1-80a9-e6e6dd980789"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.437164 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-combined-ca-bundle\") pod \"b737c895-a034-43d1-80a9-e6e6dd980789\" (UID: \"b737c895-a034-43d1-80a9-e6e6dd980789\") " Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.437602 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b737c895-a034-43d1-80a9-e6e6dd980789-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.440596 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b737c895-a034-43d1-80a9-e6e6dd980789-kube-api-access-22rg9" (OuterVolumeSpecName: "kube-api-access-22rg9") pod "b737c895-a034-43d1-80a9-e6e6dd980789" (UID: "b737c895-a034-43d1-80a9-e6e6dd980789"). InnerVolumeSpecName "kube-api-access-22rg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.465343 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b737c895-a034-43d1-80a9-e6e6dd980789" (UID: "b737c895-a034-43d1-80a9-e6e6dd980789"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.485232 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-config-data" (OuterVolumeSpecName: "config-data") pod "b737c895-a034-43d1-80a9-e6e6dd980789" (UID: "b737c895-a034-43d1-80a9-e6e6dd980789"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.539356 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22rg9\" (UniqueName: \"kubernetes.io/projected/b737c895-a034-43d1-80a9-e6e6dd980789-kube-api-access-22rg9\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.539398 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.539414 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b737c895-a034-43d1-80a9-e6e6dd980789-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.720586 4995 generic.go:334] "Generic (PLEG): container finished" podID="b737c895-a034-43d1-80a9-e6e6dd980789" containerID="dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50" exitCode=0 Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.720633 4995 generic.go:334] "Generic (PLEG): container finished" podID="b737c895-a034-43d1-80a9-e6e6dd980789" containerID="a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df" exitCode=143 Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.721902 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.722437 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b737c895-a034-43d1-80a9-e6e6dd980789","Type":"ContainerDied","Data":"dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50"} Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.722716 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b737c895-a034-43d1-80a9-e6e6dd980789","Type":"ContainerDied","Data":"a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df"} Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.722961 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b737c895-a034-43d1-80a9-e6e6dd980789","Type":"ContainerDied","Data":"dc053692beb0cbca7705b43b0b826a7f776696126d8e49cda8f58730e4e55224"} Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.723189 4995 scope.go:117] "RemoveContainer" containerID="dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.775299 4995 scope.go:117] "RemoveContainer" containerID="a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.785344 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.805177 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.816573 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:31 crc kubenswrapper[4995]: E0120 16:52:31.817116 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b737c895-a034-43d1-80a9-e6e6dd980789" containerName="nova-metadata-log" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.817136 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b737c895-a034-43d1-80a9-e6e6dd980789" containerName="nova-metadata-log" Jan 20 16:52:31 crc kubenswrapper[4995]: E0120 16:52:31.817189 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b737c895-a034-43d1-80a9-e6e6dd980789" containerName="nova-metadata-metadata" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.817198 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b737c895-a034-43d1-80a9-e6e6dd980789" containerName="nova-metadata-metadata" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.817426 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b737c895-a034-43d1-80a9-e6e6dd980789" containerName="nova-metadata-log" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.817451 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b737c895-a034-43d1-80a9-e6e6dd980789" containerName="nova-metadata-metadata" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.818766 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.822665 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.822897 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.825208 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.873337 4995 scope.go:117] "RemoveContainer" containerID="dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50" Jan 20 16:52:31 crc kubenswrapper[4995]: E0120 16:52:31.873882 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50\": container with ID starting with dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50 not found: ID does not exist" containerID="dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.873963 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50"} err="failed to get container status \"dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50\": rpc error: code = NotFound desc = could not find container \"dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50\": container with ID starting with dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50 not found: ID does not exist" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.873997 4995 scope.go:117] "RemoveContainer" containerID="a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df" Jan 20 16:52:31 crc kubenswrapper[4995]: E0120 16:52:31.874472 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df\": container with ID starting with a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df not found: ID does not exist" containerID="a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.874528 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df"} err="failed to get container status \"a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df\": rpc error: code = NotFound desc = could not find container \"a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df\": container with ID starting with a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df not found: ID does not exist" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.874548 4995 scope.go:117] "RemoveContainer" containerID="dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.876352 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50"} err="failed to get container status \"dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50\": rpc error: code = NotFound desc = could not find container \"dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50\": container with ID starting with dd04c11adc3b8ff96be1f2257bb2d31211f241591432eef1d66f84c72244fc50 not found: ID does not exist" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.876398 4995 scope.go:117] "RemoveContainer" containerID="a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.876732 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df"} err="failed to get container status \"a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df\": rpc error: code = NotFound desc = could not find container \"a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df\": container with ID starting with a5f17482b7661ad1af82a2f23e9686ddc7faed7a6ee3919f673b13317dd4f8df not found: ID does not exist" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.947186 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-config-data\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.947251 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4acb44-dc80-4c35-9a58-dd48b82080c5-logs\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.947318 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.947390 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:31 crc kubenswrapper[4995]: I0120 16:52:31.947460 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkgdf\" (UniqueName: \"kubernetes.io/projected/2c4acb44-dc80-4c35-9a58-dd48b82080c5-kube-api-access-tkgdf\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.003593 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b737c895-a034-43d1-80a9-e6e6dd980789" path="/var/lib/kubelet/pods/b737c895-a034-43d1-80a9-e6e6dd980789/volumes" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.049498 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.049577 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.049622 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkgdf\" (UniqueName: \"kubernetes.io/projected/2c4acb44-dc80-4c35-9a58-dd48b82080c5-kube-api-access-tkgdf\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.049692 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-config-data\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.049725 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4acb44-dc80-4c35-9a58-dd48b82080c5-logs\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.050139 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4acb44-dc80-4c35-9a58-dd48b82080c5-logs\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.053733 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.055291 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.063345 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-config-data\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.066939 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkgdf\" (UniqueName: \"kubernetes.io/projected/2c4acb44-dc80-4c35-9a58-dd48b82080c5-kube-api-access-tkgdf\") pod \"nova-metadata-0\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.173590 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.648973 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:32 crc kubenswrapper[4995]: W0120 16:52:32.654327 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c4acb44_dc80_4c35_9a58_dd48b82080c5.slice/crio-4396c54112fc3cc19f89c3db9fe61bf57248e0570175599f7b3f2c9d76c069f9 WatchSource:0}: Error finding container 4396c54112fc3cc19f89c3db9fe61bf57248e0570175599f7b3f2c9d76c069f9: Status 404 returned error can't find the container with id 4396c54112fc3cc19f89c3db9fe61bf57248e0570175599f7b3f2c9d76c069f9 Jan 20 16:52:32 crc kubenswrapper[4995]: I0120 16:52:32.731014 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4acb44-dc80-4c35-9a58-dd48b82080c5","Type":"ContainerStarted","Data":"4396c54112fc3cc19f89c3db9fe61bf57248e0570175599f7b3f2c9d76c069f9"} Jan 20 16:52:33 crc kubenswrapper[4995]: I0120 16:52:33.743280 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4acb44-dc80-4c35-9a58-dd48b82080c5","Type":"ContainerStarted","Data":"783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8"} Jan 20 16:52:33 crc kubenswrapper[4995]: I0120 16:52:33.743663 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4acb44-dc80-4c35-9a58-dd48b82080c5","Type":"ContainerStarted","Data":"b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0"} Jan 20 16:52:33 crc kubenswrapper[4995]: I0120 16:52:33.746000 4995 generic.go:334] "Generic (PLEG): container finished" podID="10aafbd1-4abd-4f5b-b6bd-975177b3e6e5" containerID="daa09eef2ead9557850538a2a180ef185523c81eb1fcd4785f5965b2917e5edf" exitCode=0 Jan 20 16:52:33 crc kubenswrapper[4995]: I0120 16:52:33.746040 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-c4552" event={"ID":"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5","Type":"ContainerDied","Data":"daa09eef2ead9557850538a2a180ef185523c81eb1fcd4785f5965b2917e5edf"} Jan 20 16:52:33 crc kubenswrapper[4995]: I0120 16:52:33.762257 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.7622409599999997 podStartE2EDuration="2.76224096s" podCreationTimestamp="2026-01-20 16:52:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:33.759981219 +0000 UTC m=+1272.004586075" watchObservedRunningTime="2026-01-20 16:52:33.76224096 +0000 UTC m=+1272.006845766" Jan 20 16:52:34 crc kubenswrapper[4995]: I0120 16:52:34.755833 4995 generic.go:334] "Generic (PLEG): container finished" podID="0cf9607f-a24d-47f2-bcde-a777ed54f2a8" containerID="2466dc2d36864131fa8bf12c0a882e03e2f357f5041d857fde5e7b5e7ac19837" exitCode=0 Jan 20 16:52:34 crc kubenswrapper[4995]: I0120 16:52:34.756099 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-qcr9x" event={"ID":"0cf9607f-a24d-47f2-bcde-a777ed54f2a8","Type":"ContainerDied","Data":"2466dc2d36864131fa8bf12c0a882e03e2f357f5041d857fde5e7b5e7ac19837"} Jan 20 16:52:34 crc kubenswrapper[4995]: I0120 16:52:34.761698 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 20 16:52:34 crc kubenswrapper[4995]: I0120 16:52:34.761754 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 20 16:52:34 crc kubenswrapper[4995]: I0120 16:52:34.816687 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.110790 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.152292 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.152683 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.180338 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.214742 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-config-data\") pod \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.214814 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-combined-ca-bundle\") pod \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.214965 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-scripts\") pod \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.214994 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrptn\" (UniqueName: \"kubernetes.io/projected/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-kube-api-access-xrptn\") pod \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\" (UID: \"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.231350 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-scripts" (OuterVolumeSpecName: "scripts") pod "10aafbd1-4abd-4f5b-b6bd-975177b3e6e5" (UID: "10aafbd1-4abd-4f5b-b6bd-975177b3e6e5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.247285 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-kube-api-access-xrptn" (OuterVolumeSpecName: "kube-api-access-xrptn") pod "10aafbd1-4abd-4f5b-b6bd-975177b3e6e5" (UID: "10aafbd1-4abd-4f5b-b6bd-975177b3e6e5"). InnerVolumeSpecName "kube-api-access-xrptn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.261401 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-4kqz9"] Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.261657 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" podUID="ef8757f0-901f-4c5f-ac40-85d643918a47" containerName="dnsmasq-dns" containerID="cri-o://b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6" gracePeriod=10 Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.265859 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-config-data" (OuterVolumeSpecName: "config-data") pod "10aafbd1-4abd-4f5b-b6bd-975177b3e6e5" (UID: "10aafbd1-4abd-4f5b-b6bd-975177b3e6e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.290915 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10aafbd1-4abd-4f5b-b6bd-975177b3e6e5" (UID: "10aafbd1-4abd-4f5b-b6bd-975177b3e6e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.317085 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.317113 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.317122 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.317130 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrptn\" (UniqueName: \"kubernetes.io/projected/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5-kube-api-access-xrptn\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.461770 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.741213 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.770744 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-c4552" event={"ID":"10aafbd1-4abd-4f5b-b6bd-975177b3e6e5","Type":"ContainerDied","Data":"05292e290880be2328141094f47f86e53bf60a98256d4bdc26f84264c176b990"} Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.770773 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-c4552" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.770794 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05292e290880be2328141094f47f86e53bf60a98256d4bdc26f84264c176b990" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.773278 4995 generic.go:334] "Generic (PLEG): container finished" podID="ef8757f0-901f-4c5f-ac40-85d643918a47" containerID="b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6" exitCode=0 Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.773353 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.773348 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" event={"ID":"ef8757f0-901f-4c5f-ac40-85d643918a47","Type":"ContainerDied","Data":"b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6"} Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.773548 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-4kqz9" event={"ID":"ef8757f0-901f-4c5f-ac40-85d643918a47","Type":"ContainerDied","Data":"1b094aced33825a1b27302a7abf62f195ec594f6cabd634fcded6ac71708b718"} Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.773573 4995 scope.go:117] "RemoveContainer" containerID="b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.827546 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-swift-storage-0\") pod \"ef8757f0-901f-4c5f-ac40-85d643918a47\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.827623 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-config\") pod \"ef8757f0-901f-4c5f-ac40-85d643918a47\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.827696 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-sb\") pod \"ef8757f0-901f-4c5f-ac40-85d643918a47\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.827827 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-svc\") pod \"ef8757f0-901f-4c5f-ac40-85d643918a47\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.827855 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp8rx\" (UniqueName: \"kubernetes.io/projected/ef8757f0-901f-4c5f-ac40-85d643918a47-kube-api-access-qp8rx\") pod \"ef8757f0-901f-4c5f-ac40-85d643918a47\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.827910 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-nb\") pod \"ef8757f0-901f-4c5f-ac40-85d643918a47\" (UID: \"ef8757f0-901f-4c5f-ac40-85d643918a47\") " Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.837320 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.838725 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef8757f0-901f-4c5f-ac40-85d643918a47-kube-api-access-qp8rx" (OuterVolumeSpecName: "kube-api-access-qp8rx") pod "ef8757f0-901f-4c5f-ac40-85d643918a47" (UID: "ef8757f0-901f-4c5f-ac40-85d643918a47"). InnerVolumeSpecName "kube-api-access-qp8rx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.898414 4995 scope.go:117] "RemoveContainer" containerID="76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.899067 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ef8757f0-901f-4c5f-ac40-85d643918a47" (UID: "ef8757f0-901f-4c5f-ac40-85d643918a47"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.922989 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ef8757f0-901f-4c5f-ac40-85d643918a47" (UID: "ef8757f0-901f-4c5f-ac40-85d643918a47"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.930003 4995 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.930031 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp8rx\" (UniqueName: \"kubernetes.io/projected/ef8757f0-901f-4c5f-ac40-85d643918a47-kube-api-access-qp8rx\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.930043 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.931650 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-config" (OuterVolumeSpecName: "config") pod "ef8757f0-901f-4c5f-ac40-85d643918a47" (UID: "ef8757f0-901f-4c5f-ac40-85d643918a47"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.935287 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ef8757f0-901f-4c5f-ac40-85d643918a47" (UID: "ef8757f0-901f-4c5f-ac40-85d643918a47"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.941230 4995 scope.go:117] "RemoveContainer" containerID="b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6" Jan 20 16:52:35 crc kubenswrapper[4995]: E0120 16:52:35.944405 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6\": container with ID starting with b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6 not found: ID does not exist" containerID="b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.944452 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6"} err="failed to get container status \"b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6\": rpc error: code = NotFound desc = could not find container \"b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6\": container with ID starting with b192258ba5e2e4d48b89331f0394745285f5eec4532f12f4ce1f0132a8f45dc6 not found: ID does not exist" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.944484 4995 scope.go:117] "RemoveContainer" containerID="76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.958114 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ef8757f0-901f-4c5f-ac40-85d643918a47" (UID: "ef8757f0-901f-4c5f-ac40-85d643918a47"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:52:35 crc kubenswrapper[4995]: E0120 16:52:35.960031 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06\": container with ID starting with 76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06 not found: ID does not exist" containerID="76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06" Jan 20 16:52:35 crc kubenswrapper[4995]: I0120 16:52:35.960188 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06"} err="failed to get container status \"76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06\": rpc error: code = NotFound desc = could not find container \"76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06\": container with ID starting with 76c682d5bc1eee4387114aeeb90cee095a7a46926d4626012bb47b4fee6d6f06 not found: ID does not exist" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.024993 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.025299 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-log" containerID="cri-o://bfcb1dc02ef4fa570ec58c96ed9598e7be7a64980297ac215fd2a1755625f24b" gracePeriod=30 Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.025445 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-api" containerID="cri-o://beb6abf5934bded5478dc89e039bd18eea65f10e199fecfb5cb5ae38761c27f1" gracePeriod=30 Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.032567 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.032606 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.032618 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8757f0-901f-4c5f-ac40-85d643918a47-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.045953 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.046298 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerName="nova-metadata-log" containerID="cri-o://b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0" gracePeriod=30 Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.049039 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerName="nova-metadata-metadata" containerID="cri-o://783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8" gracePeriod=30 Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.049662 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.210:8774/\": EOF" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.050123 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.210:8774/\": EOF" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.136449 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-4kqz9"] Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.145493 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-4kqz9"] Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.325439 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.327006 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.438289 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzzb6\" (UniqueName: \"kubernetes.io/projected/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-kube-api-access-fzzb6\") pod \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.438357 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-config-data\") pod \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.438394 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-combined-ca-bundle\") pod \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.438452 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-scripts\") pod \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\" (UID: \"0cf9607f-a24d-47f2-bcde-a777ed54f2a8\") " Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.443559 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-scripts" (OuterVolumeSpecName: "scripts") pod "0cf9607f-a24d-47f2-bcde-a777ed54f2a8" (UID: "0cf9607f-a24d-47f2-bcde-a777ed54f2a8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.446875 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-kube-api-access-fzzb6" (OuterVolumeSpecName: "kube-api-access-fzzb6") pod "0cf9607f-a24d-47f2-bcde-a777ed54f2a8" (UID: "0cf9607f-a24d-47f2-bcde-a777ed54f2a8"). InnerVolumeSpecName "kube-api-access-fzzb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.508759 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-config-data" (OuterVolumeSpecName: "config-data") pod "0cf9607f-a24d-47f2-bcde-a777ed54f2a8" (UID: "0cf9607f-a24d-47f2-bcde-a777ed54f2a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.518051 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0cf9607f-a24d-47f2-bcde-a777ed54f2a8" (UID: "0cf9607f-a24d-47f2-bcde-a777ed54f2a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.542944 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzzb6\" (UniqueName: \"kubernetes.io/projected/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-kube-api-access-fzzb6\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.542980 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.542991 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.542999 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cf9607f-a24d-47f2-bcde-a777ed54f2a8-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.622276 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.746055 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-config-data\") pod \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.746158 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-nova-metadata-tls-certs\") pod \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.746271 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4acb44-dc80-4c35-9a58-dd48b82080c5-logs\") pod \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.746355 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkgdf\" (UniqueName: \"kubernetes.io/projected/2c4acb44-dc80-4c35-9a58-dd48b82080c5-kube-api-access-tkgdf\") pod \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.746440 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-combined-ca-bundle\") pod \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\" (UID: \"2c4acb44-dc80-4c35-9a58-dd48b82080c5\") " Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.746819 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c4acb44-dc80-4c35-9a58-dd48b82080c5-logs" (OuterVolumeSpecName: "logs") pod "2c4acb44-dc80-4c35-9a58-dd48b82080c5" (UID: "2c4acb44-dc80-4c35-9a58-dd48b82080c5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.752155 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c4acb44-dc80-4c35-9a58-dd48b82080c5-kube-api-access-tkgdf" (OuterVolumeSpecName: "kube-api-access-tkgdf") pod "2c4acb44-dc80-4c35-9a58-dd48b82080c5" (UID: "2c4acb44-dc80-4c35-9a58-dd48b82080c5"). InnerVolumeSpecName "kube-api-access-tkgdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.775886 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-config-data" (OuterVolumeSpecName: "config-data") pod "2c4acb44-dc80-4c35-9a58-dd48b82080c5" (UID: "2c4acb44-dc80-4c35-9a58-dd48b82080c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.783564 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-qcr9x" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.783753 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-qcr9x" event={"ID":"0cf9607f-a24d-47f2-bcde-a777ed54f2a8","Type":"ContainerDied","Data":"e07ab32746bf7b0291e6366a04d823a29db79066f61e0596c77dc9d8a21d2659"} Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.783787 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e07ab32746bf7b0291e6366a04d823a29db79066f61e0596c77dc9d8a21d2659" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.785177 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c4acb44-dc80-4c35-9a58-dd48b82080c5" (UID: "2c4acb44-dc80-4c35-9a58-dd48b82080c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.785283 4995 generic.go:334] "Generic (PLEG): container finished" podID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerID="bfcb1dc02ef4fa570ec58c96ed9598e7be7a64980297ac215fd2a1755625f24b" exitCode=143 Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.785324 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8c423fbf-ef87-43a8-92ca-374891812cc8","Type":"ContainerDied","Data":"bfcb1dc02ef4fa570ec58c96ed9598e7be7a64980297ac215fd2a1755625f24b"} Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.788632 4995 generic.go:334] "Generic (PLEG): container finished" podID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerID="783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8" exitCode=0 Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.788657 4995 generic.go:334] "Generic (PLEG): container finished" podID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerID="b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0" exitCode=143 Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.788712 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.788914 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4acb44-dc80-4c35-9a58-dd48b82080c5","Type":"ContainerDied","Data":"783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8"} Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.788945 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4acb44-dc80-4c35-9a58-dd48b82080c5","Type":"ContainerDied","Data":"b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0"} Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.788959 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4acb44-dc80-4c35-9a58-dd48b82080c5","Type":"ContainerDied","Data":"4396c54112fc3cc19f89c3db9fe61bf57248e0570175599f7b3f2c9d76c069f9"} Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.788977 4995 scope.go:117] "RemoveContainer" containerID="783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.812539 4995 scope.go:117] "RemoveContainer" containerID="b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.833945 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "2c4acb44-dc80-4c35-9a58-dd48b82080c5" (UID: "2c4acb44-dc80-4c35-9a58-dd48b82080c5"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.840560 4995 scope.go:117] "RemoveContainer" containerID="783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8" Jan 20 16:52:36 crc kubenswrapper[4995]: E0120 16:52:36.841115 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8\": container with ID starting with 783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8 not found: ID does not exist" containerID="783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.841160 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8"} err="failed to get container status \"783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8\": rpc error: code = NotFound desc = could not find container \"783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8\": container with ID starting with 783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8 not found: ID does not exist" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.841186 4995 scope.go:117] "RemoveContainer" containerID="b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0" Jan 20 16:52:36 crc kubenswrapper[4995]: E0120 16:52:36.841625 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0\": container with ID starting with b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0 not found: ID does not exist" containerID="b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.841659 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0"} err="failed to get container status \"b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0\": rpc error: code = NotFound desc = could not find container \"b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0\": container with ID starting with b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0 not found: ID does not exist" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.841680 4995 scope.go:117] "RemoveContainer" containerID="783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.841994 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8"} err="failed to get container status \"783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8\": rpc error: code = NotFound desc = could not find container \"783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8\": container with ID starting with 783fdb1cef9d9bb4753b8df4e78fb635f33f6bdca3a3b31ad01cbdd81a1936b8 not found: ID does not exist" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.842042 4995 scope.go:117] "RemoveContainer" containerID="b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.842418 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0"} err="failed to get container status \"b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0\": rpc error: code = NotFound desc = could not find container \"b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0\": container with ID starting with b015fa483b0f63a6bc669fbd19d6d2de6386467473d726edaa5de3d53f1919e0 not found: ID does not exist" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.848486 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.848513 4995 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.848525 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4acb44-dc80-4c35-9a58-dd48b82080c5-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.848534 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkgdf\" (UniqueName: \"kubernetes.io/projected/2c4acb44-dc80-4c35-9a58-dd48b82080c5-kube-api-access-tkgdf\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.848544 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4acb44-dc80-4c35-9a58-dd48b82080c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.886932 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 20 16:52:36 crc kubenswrapper[4995]: E0120 16:52:36.887313 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef8757f0-901f-4c5f-ac40-85d643918a47" containerName="dnsmasq-dns" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887330 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef8757f0-901f-4c5f-ac40-85d643918a47" containerName="dnsmasq-dns" Jan 20 16:52:36 crc kubenswrapper[4995]: E0120 16:52:36.887339 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10aafbd1-4abd-4f5b-b6bd-975177b3e6e5" containerName="nova-manage" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887346 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="10aafbd1-4abd-4f5b-b6bd-975177b3e6e5" containerName="nova-manage" Jan 20 16:52:36 crc kubenswrapper[4995]: E0120 16:52:36.887361 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerName="nova-metadata-metadata" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887367 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerName="nova-metadata-metadata" Jan 20 16:52:36 crc kubenswrapper[4995]: E0120 16:52:36.887386 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cf9607f-a24d-47f2-bcde-a777ed54f2a8" containerName="nova-cell1-conductor-db-sync" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887391 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cf9607f-a24d-47f2-bcde-a777ed54f2a8" containerName="nova-cell1-conductor-db-sync" Jan 20 16:52:36 crc kubenswrapper[4995]: E0120 16:52:36.887406 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef8757f0-901f-4c5f-ac40-85d643918a47" containerName="init" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887411 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef8757f0-901f-4c5f-ac40-85d643918a47" containerName="init" Jan 20 16:52:36 crc kubenswrapper[4995]: E0120 16:52:36.887421 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerName="nova-metadata-log" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887428 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerName="nova-metadata-log" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887596 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerName="nova-metadata-metadata" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887615 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cf9607f-a24d-47f2-bcde-a777ed54f2a8" containerName="nova-cell1-conductor-db-sync" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887624 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" containerName="nova-metadata-log" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887631 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="10aafbd1-4abd-4f5b-b6bd-975177b3e6e5" containerName="nova-manage" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.887643 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef8757f0-901f-4c5f-ac40-85d643918a47" containerName="dnsmasq-dns" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.890187 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.892062 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 20 16:52:36 crc kubenswrapper[4995]: I0120 16:52:36.907236 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.052457 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68ec0231-a7a1-45be-afbf-e66cd2a68d38-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"68ec0231-a7a1-45be-afbf-e66cd2a68d38\") " pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.052514 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68ec0231-a7a1-45be-afbf-e66cd2a68d38-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"68ec0231-a7a1-45be-afbf-e66cd2a68d38\") " pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.052633 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vh9p\" (UniqueName: \"kubernetes.io/projected/68ec0231-a7a1-45be-afbf-e66cd2a68d38-kube-api-access-4vh9p\") pod \"nova-cell1-conductor-0\" (UID: \"68ec0231-a7a1-45be-afbf-e66cd2a68d38\") " pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.124751 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.141176 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.154858 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vh9p\" (UniqueName: \"kubernetes.io/projected/68ec0231-a7a1-45be-afbf-e66cd2a68d38-kube-api-access-4vh9p\") pod \"nova-cell1-conductor-0\" (UID: \"68ec0231-a7a1-45be-afbf-e66cd2a68d38\") " pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.155722 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68ec0231-a7a1-45be-afbf-e66cd2a68d38-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"68ec0231-a7a1-45be-afbf-e66cd2a68d38\") " pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.155786 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68ec0231-a7a1-45be-afbf-e66cd2a68d38-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"68ec0231-a7a1-45be-afbf-e66cd2a68d38\") " pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.159500 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68ec0231-a7a1-45be-afbf-e66cd2a68d38-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"68ec0231-a7a1-45be-afbf-e66cd2a68d38\") " pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.161891 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/68ec0231-a7a1-45be-afbf-e66cd2a68d38-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"68ec0231-a7a1-45be-afbf-e66cd2a68d38\") " pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.162335 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.164201 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.166200 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.167271 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.180149 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.182350 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vh9p\" (UniqueName: \"kubernetes.io/projected/68ec0231-a7a1-45be-afbf-e66cd2a68d38-kube-api-access-4vh9p\") pod \"nova-cell1-conductor-0\" (UID: \"68ec0231-a7a1-45be-afbf-e66cd2a68d38\") " pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.257381 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.261105 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-config-data\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.261151 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-logs\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.261193 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6n6s\" (UniqueName: \"kubernetes.io/projected/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-kube-api-access-m6n6s\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.261279 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.261299 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.363855 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.363918 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.364020 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-config-data\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.364052 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-logs\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.364117 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6n6s\" (UniqueName: \"kubernetes.io/projected/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-kube-api-access-m6n6s\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.365322 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-logs\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.369659 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.374431 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-config-data\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.380540 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.384728 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6n6s\" (UniqueName: \"kubernetes.io/projected/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-kube-api-access-m6n6s\") pod \"nova-metadata-0\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.558277 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.706716 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.800192 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"68ec0231-a7a1-45be-afbf-e66cd2a68d38","Type":"ContainerStarted","Data":"f1b6cfb7dde4cb7b8f7489f9f3d187fe81a021a6d60317a9db000722d36e493b"} Jan 20 16:52:37 crc kubenswrapper[4995]: I0120 16:52:37.802125 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0c33ee49-46b9-41c7-84c0-2828d35aa505" containerName="nova-scheduler-scheduler" containerID="cri-o://fcbd88eb50da2f98a1d828ba593263b64b8b98d5c58f7646dd278a5a44d11cb1" gracePeriod=30 Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.001112 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c4acb44-dc80-4c35-9a58-dd48b82080c5" path="/var/lib/kubelet/pods/2c4acb44-dc80-4c35-9a58-dd48b82080c5/volumes" Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.002270 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef8757f0-901f-4c5f-ac40-85d643918a47" path="/var/lib/kubelet/pods/ef8757f0-901f-4c5f-ac40-85d643918a47/volumes" Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.007686 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:52:38 crc kubenswrapper[4995]: W0120 16:52:38.018411 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod879d57fe_7d15_4279_b4b2_cd5f16db5ac9.slice/crio-91e278fc9b4e994cab02999ea2f9d709d466691e031be40fa0223cf190c07937 WatchSource:0}: Error finding container 91e278fc9b4e994cab02999ea2f9d709d466691e031be40fa0223cf190c07937: Status 404 returned error can't find the container with id 91e278fc9b4e994cab02999ea2f9d709d466691e031be40fa0223cf190c07937 Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.813783 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"879d57fe-7d15-4279-b4b2-cd5f16db5ac9","Type":"ContainerStarted","Data":"c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914"} Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.813831 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"879d57fe-7d15-4279-b4b2-cd5f16db5ac9","Type":"ContainerStarted","Data":"2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0"} Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.813847 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"879d57fe-7d15-4279-b4b2-cd5f16db5ac9","Type":"ContainerStarted","Data":"91e278fc9b4e994cab02999ea2f9d709d466691e031be40fa0223cf190c07937"} Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.815480 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"68ec0231-a7a1-45be-afbf-e66cd2a68d38","Type":"ContainerStarted","Data":"84938daf435dc0842a94c6507cecd5c1bb1b76aea45efa9734d1cfa158a3851c"} Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.815923 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.832654 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.832624566 podStartE2EDuration="1.832624566s" podCreationTimestamp="2026-01-20 16:52:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:38.828662139 +0000 UTC m=+1277.073266945" watchObservedRunningTime="2026-01-20 16:52:38.832624566 +0000 UTC m=+1277.077229372" Jan 20 16:52:38 crc kubenswrapper[4995]: I0120 16:52:38.856312 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.856292748 podStartE2EDuration="2.856292748s" podCreationTimestamp="2026-01-20 16:52:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:38.845642269 +0000 UTC m=+1277.090247085" watchObservedRunningTime="2026-01-20 16:52:38.856292748 +0000 UTC m=+1277.100897554" Jan 20 16:52:39 crc kubenswrapper[4995]: E0120 16:52:39.764275 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fcbd88eb50da2f98a1d828ba593263b64b8b98d5c58f7646dd278a5a44d11cb1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 20 16:52:39 crc kubenswrapper[4995]: E0120 16:52:39.765741 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fcbd88eb50da2f98a1d828ba593263b64b8b98d5c58f7646dd278a5a44d11cb1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 20 16:52:39 crc kubenswrapper[4995]: E0120 16:52:39.767605 4995 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fcbd88eb50da2f98a1d828ba593263b64b8b98d5c58f7646dd278a5a44d11cb1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 20 16:52:39 crc kubenswrapper[4995]: E0120 16:52:39.767643 4995 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="0c33ee49-46b9-41c7-84c0-2828d35aa505" containerName="nova-scheduler-scheduler" Jan 20 16:52:40 crc kubenswrapper[4995]: I0120 16:52:40.682547 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 20 16:52:41 crc kubenswrapper[4995]: I0120 16:52:41.870219 4995 generic.go:334] "Generic (PLEG): container finished" podID="0c33ee49-46b9-41c7-84c0-2828d35aa505" containerID="fcbd88eb50da2f98a1d828ba593263b64b8b98d5c58f7646dd278a5a44d11cb1" exitCode=0 Jan 20 16:52:41 crc kubenswrapper[4995]: I0120 16:52:41.870900 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0c33ee49-46b9-41c7-84c0-2828d35aa505","Type":"ContainerDied","Data":"fcbd88eb50da2f98a1d828ba593263b64b8b98d5c58f7646dd278a5a44d11cb1"} Jan 20 16:52:41 crc kubenswrapper[4995]: I0120 16:52:41.873363 4995 generic.go:334] "Generic (PLEG): container finished" podID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerID="beb6abf5934bded5478dc89e039bd18eea65f10e199fecfb5cb5ae38761c27f1" exitCode=0 Jan 20 16:52:41 crc kubenswrapper[4995]: I0120 16:52:41.873395 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8c423fbf-ef87-43a8-92ca-374891812cc8","Type":"ContainerDied","Data":"beb6abf5934bded5478dc89e039bd18eea65f10e199fecfb5cb5ae38761c27f1"} Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.040957 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.047162 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.161383 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-config-data\") pod \"8c423fbf-ef87-43a8-92ca-374891812cc8\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.161443 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-combined-ca-bundle\") pod \"0c33ee49-46b9-41c7-84c0-2828d35aa505\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.161491 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cwrn\" (UniqueName: \"kubernetes.io/projected/8c423fbf-ef87-43a8-92ca-374891812cc8-kube-api-access-4cwrn\") pod \"8c423fbf-ef87-43a8-92ca-374891812cc8\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.161692 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mjgl\" (UniqueName: \"kubernetes.io/projected/0c33ee49-46b9-41c7-84c0-2828d35aa505-kube-api-access-6mjgl\") pod \"0c33ee49-46b9-41c7-84c0-2828d35aa505\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.161780 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-config-data\") pod \"0c33ee49-46b9-41c7-84c0-2828d35aa505\" (UID: \"0c33ee49-46b9-41c7-84c0-2828d35aa505\") " Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.161918 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-combined-ca-bundle\") pod \"8c423fbf-ef87-43a8-92ca-374891812cc8\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.161962 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c423fbf-ef87-43a8-92ca-374891812cc8-logs\") pod \"8c423fbf-ef87-43a8-92ca-374891812cc8\" (UID: \"8c423fbf-ef87-43a8-92ca-374891812cc8\") " Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.162751 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c423fbf-ef87-43a8-92ca-374891812cc8-logs" (OuterVolumeSpecName: "logs") pod "8c423fbf-ef87-43a8-92ca-374891812cc8" (UID: "8c423fbf-ef87-43a8-92ca-374891812cc8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.163834 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8c423fbf-ef87-43a8-92ca-374891812cc8-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.173409 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c423fbf-ef87-43a8-92ca-374891812cc8-kube-api-access-4cwrn" (OuterVolumeSpecName: "kube-api-access-4cwrn") pod "8c423fbf-ef87-43a8-92ca-374891812cc8" (UID: "8c423fbf-ef87-43a8-92ca-374891812cc8"). InnerVolumeSpecName "kube-api-access-4cwrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.174145 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c33ee49-46b9-41c7-84c0-2828d35aa505-kube-api-access-6mjgl" (OuterVolumeSpecName: "kube-api-access-6mjgl") pod "0c33ee49-46b9-41c7-84c0-2828d35aa505" (UID: "0c33ee49-46b9-41c7-84c0-2828d35aa505"). InnerVolumeSpecName "kube-api-access-6mjgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.197573 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-config-data" (OuterVolumeSpecName: "config-data") pod "0c33ee49-46b9-41c7-84c0-2828d35aa505" (UID: "0c33ee49-46b9-41c7-84c0-2828d35aa505"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.199064 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8c423fbf-ef87-43a8-92ca-374891812cc8" (UID: "8c423fbf-ef87-43a8-92ca-374891812cc8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.211377 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c33ee49-46b9-41c7-84c0-2828d35aa505" (UID: "0c33ee49-46b9-41c7-84c0-2828d35aa505"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.214135 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-config-data" (OuterVolumeSpecName: "config-data") pod "8c423fbf-ef87-43a8-92ca-374891812cc8" (UID: "8c423fbf-ef87-43a8-92ca-374891812cc8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.267221 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.267257 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c423fbf-ef87-43a8-92ca-374891812cc8-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.267266 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.267275 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cwrn\" (UniqueName: \"kubernetes.io/projected/8c423fbf-ef87-43a8-92ca-374891812cc8-kube-api-access-4cwrn\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.267288 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mjgl\" (UniqueName: \"kubernetes.io/projected/0c33ee49-46b9-41c7-84c0-2828d35aa505-kube-api-access-6mjgl\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.267296 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c33ee49-46b9-41c7-84c0-2828d35aa505-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.286019 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.558396 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.558959 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.881586 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0c33ee49-46b9-41c7-84c0-2828d35aa505","Type":"ContainerDied","Data":"7c84e2bca1aa6ef86947dfc60ce554ce5e2e142dcb48e62de4bdb101d69f2e53"} Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.881601 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.881633 4995 scope.go:117] "RemoveContainer" containerID="fcbd88eb50da2f98a1d828ba593263b64b8b98d5c58f7646dd278a5a44d11cb1" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.884236 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8c423fbf-ef87-43a8-92ca-374891812cc8","Type":"ContainerDied","Data":"0fb528cf0b4e5663fb621fb66a0f39f95854614b5825f0a438fa1c46e5567ef5"} Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.884357 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.916042 4995 scope.go:117] "RemoveContainer" containerID="beb6abf5934bded5478dc89e039bd18eea65f10e199fecfb5cb5ae38761c27f1" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.938643 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.941307 4995 scope.go:117] "RemoveContainer" containerID="bfcb1dc02ef4fa570ec58c96ed9598e7be7a64980297ac215fd2a1755625f24b" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.952175 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.963153 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.974202 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.987480 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:52:42 crc kubenswrapper[4995]: E0120 16:52:42.987906 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-log" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.987950 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-log" Jan 20 16:52:42 crc kubenswrapper[4995]: E0120 16:52:42.988010 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-api" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.988019 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-api" Jan 20 16:52:42 crc kubenswrapper[4995]: E0120 16:52:42.988048 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c33ee49-46b9-41c7-84c0-2828d35aa505" containerName="nova-scheduler-scheduler" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.988057 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c33ee49-46b9-41c7-84c0-2828d35aa505" containerName="nova-scheduler-scheduler" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.988294 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c33ee49-46b9-41c7-84c0-2828d35aa505" containerName="nova-scheduler-scheduler" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.988320 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-log" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.988340 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" containerName="nova-api-api" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.988958 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:52:42 crc kubenswrapper[4995]: I0120 16:52:42.991840 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.001211 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.011906 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.013555 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.022201 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.022499 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.183977 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm9p6\" (UniqueName: \"kubernetes.io/projected/41eb2c3c-0030-4feb-a046-26b558595460-kube-api-access-nm9p6\") pod \"nova-scheduler-0\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.184228 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.184264 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-config-data\") pod \"nova-scheduler-0\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.184389 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82c2fe85-b2a1-49b9-88d3-811aefa72dab-logs\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.184417 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-config-data\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.184449 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6nc8\" (UniqueName: \"kubernetes.io/projected/82c2fe85-b2a1-49b9-88d3-811aefa72dab-kube-api-access-j6nc8\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.184518 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.286765 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm9p6\" (UniqueName: \"kubernetes.io/projected/41eb2c3c-0030-4feb-a046-26b558595460-kube-api-access-nm9p6\") pod \"nova-scheduler-0\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.286899 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.286930 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-config-data\") pod \"nova-scheduler-0\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.287008 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82c2fe85-b2a1-49b9-88d3-811aefa72dab-logs\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.287027 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-config-data\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.287054 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6nc8\" (UniqueName: \"kubernetes.io/projected/82c2fe85-b2a1-49b9-88d3-811aefa72dab-kube-api-access-j6nc8\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.287162 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.287606 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82c2fe85-b2a1-49b9-88d3-811aefa72dab-logs\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.291656 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.291746 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-config-data\") pod \"nova-scheduler-0\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.292231 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-config-data\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.296103 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.303879 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm9p6\" (UniqueName: \"kubernetes.io/projected/41eb2c3c-0030-4feb-a046-26b558595460-kube-api-access-nm9p6\") pod \"nova-scheduler-0\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.307746 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6nc8\" (UniqueName: \"kubernetes.io/projected/82c2fe85-b2a1-49b9-88d3-811aefa72dab-kube-api-access-j6nc8\") pod \"nova-api-0\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.311754 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.338584 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.838923 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.893315 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"41eb2c3c-0030-4feb-a046-26b558595460","Type":"ContainerStarted","Data":"63ba2da10a9687266413ad9108857eda38e2ad184bdc15d92790bb0c100255dc"} Jan 20 16:52:43 crc kubenswrapper[4995]: I0120 16:52:43.934091 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:52:43 crc kubenswrapper[4995]: W0120 16:52:43.938037 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82c2fe85_b2a1_49b9_88d3_811aefa72dab.slice/crio-3d8411c72a5bed73fec7688703d56d1e06e402a31925b2574ff48be689e18a0c WatchSource:0}: Error finding container 3d8411c72a5bed73fec7688703d56d1e06e402a31925b2574ff48be689e18a0c: Status 404 returned error can't find the container with id 3d8411c72a5bed73fec7688703d56d1e06e402a31925b2574ff48be689e18a0c Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.005412 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c33ee49-46b9-41c7-84c0-2828d35aa505" path="/var/lib/kubelet/pods/0c33ee49-46b9-41c7-84c0-2828d35aa505/volumes" Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.006941 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c423fbf-ef87-43a8-92ca-374891812cc8" path="/var/lib/kubelet/pods/8c423fbf-ef87-43a8-92ca-374891812cc8/volumes" Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.908441 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"82c2fe85-b2a1-49b9-88d3-811aefa72dab","Type":"ContainerStarted","Data":"b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e"} Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.910016 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"82c2fe85-b2a1-49b9-88d3-811aefa72dab","Type":"ContainerStarted","Data":"500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935"} Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.910166 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"82c2fe85-b2a1-49b9-88d3-811aefa72dab","Type":"ContainerStarted","Data":"3d8411c72a5bed73fec7688703d56d1e06e402a31925b2574ff48be689e18a0c"} Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.913891 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"41eb2c3c-0030-4feb-a046-26b558595460","Type":"ContainerStarted","Data":"28c557cb7e66ee02aab7ab9c869fcd4add944ca116d13d270a33a33b92c43c9f"} Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.954564 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.954824 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="1650b6ff-e786-4244-bb71-5611bc85fa90" containerName="kube-state-metrics" containerID="cri-o://d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090" gracePeriod=30 Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.963991 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.963962021 podStartE2EDuration="2.963962021s" podCreationTimestamp="2026-01-20 16:52:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:44.956597171 +0000 UTC m=+1283.201201977" watchObservedRunningTime="2026-01-20 16:52:44.963962021 +0000 UTC m=+1283.208566827" Jan 20 16:52:44 crc kubenswrapper[4995]: I0120 16:52:44.990232 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.990203642 podStartE2EDuration="2.990203642s" podCreationTimestamp="2026-01-20 16:52:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:52:44.972938434 +0000 UTC m=+1283.217543260" watchObservedRunningTime="2026-01-20 16:52:44.990203642 +0000 UTC m=+1283.234808458" Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.447934 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.567617 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwzhs\" (UniqueName: \"kubernetes.io/projected/1650b6ff-e786-4244-bb71-5611bc85fa90-kube-api-access-vwzhs\") pod \"1650b6ff-e786-4244-bb71-5611bc85fa90\" (UID: \"1650b6ff-e786-4244-bb71-5611bc85fa90\") " Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.573382 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1650b6ff-e786-4244-bb71-5611bc85fa90-kube-api-access-vwzhs" (OuterVolumeSpecName: "kube-api-access-vwzhs") pod "1650b6ff-e786-4244-bb71-5611bc85fa90" (UID: "1650b6ff-e786-4244-bb71-5611bc85fa90"). InnerVolumeSpecName "kube-api-access-vwzhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.669923 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwzhs\" (UniqueName: \"kubernetes.io/projected/1650b6ff-e786-4244-bb71-5611bc85fa90-kube-api-access-vwzhs\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.931606 4995 generic.go:334] "Generic (PLEG): container finished" podID="1650b6ff-e786-4244-bb71-5611bc85fa90" containerID="d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090" exitCode=2 Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.932924 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.933803 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1650b6ff-e786-4244-bb71-5611bc85fa90","Type":"ContainerDied","Data":"d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090"} Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.933876 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"1650b6ff-e786-4244-bb71-5611bc85fa90","Type":"ContainerDied","Data":"d45bf6dc5d45302cd6c6c7c1457831ec63c15dee3af457398a44c2fe2bac6b97"} Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.933896 4995 scope.go:117] "RemoveContainer" containerID="d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090" Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.969272 4995 scope.go:117] "RemoveContainer" containerID="d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090" Jan 20 16:52:45 crc kubenswrapper[4995]: E0120 16:52:45.969960 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090\": container with ID starting with d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090 not found: ID does not exist" containerID="d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090" Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.970000 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090"} err="failed to get container status \"d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090\": rpc error: code = NotFound desc = could not find container \"d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090\": container with ID starting with d4daaf9574d17ee50b03d63ef32e30279ecd632ada7946fd5b0d0b44fed96090 not found: ID does not exist" Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.980434 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 16:52:45 crc kubenswrapper[4995]: I0120 16:52:45.987920 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.002667 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1650b6ff-e786-4244-bb71-5611bc85fa90" path="/var/lib/kubelet/pods/1650b6ff-e786-4244-bb71-5611bc85fa90/volumes" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.006358 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 16:52:46 crc kubenswrapper[4995]: E0120 16:52:46.006711 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1650b6ff-e786-4244-bb71-5611bc85fa90" containerName="kube-state-metrics" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.006722 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1650b6ff-e786-4244-bb71-5611bc85fa90" containerName="kube-state-metrics" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.006894 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1650b6ff-e786-4244-bb71-5611bc85fa90" containerName="kube-state-metrics" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.007594 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.011602 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.012551 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.045855 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.081676 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.081747 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9tss\" (UniqueName: \"kubernetes.io/projected/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-kube-api-access-p9tss\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.081897 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.081956 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.183941 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.184155 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.184224 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9tss\" (UniqueName: \"kubernetes.io/projected/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-kube-api-access-p9tss\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.184281 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.187622 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.187724 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.197182 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.203031 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9tss\" (UniqueName: \"kubernetes.io/projected/0e5570d0-a1c7-46f5-a5f6-529ad06cf05f-kube-api-access-p9tss\") pod \"kube-state-metrics-0\" (UID: \"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f\") " pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.335876 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.821255 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.877517 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.877803 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="ceilometer-central-agent" containerID="cri-o://606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893" gracePeriod=30 Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.877853 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="ceilometer-notification-agent" containerID="cri-o://41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc" gracePeriod=30 Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.877811 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="proxy-httpd" containerID="cri-o://14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65" gracePeriod=30 Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.877880 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="sg-core" containerID="cri-o://af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737" gracePeriod=30 Jan 20 16:52:46 crc kubenswrapper[4995]: I0120 16:52:46.943669 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f","Type":"ContainerStarted","Data":"ddc03c9358e16d1ded59f8b8e647fe1a3004512a167789d40c2d43d57c0d259c"} Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.559553 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.559796 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.952186 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"0e5570d0-a1c7-46f5-a5f6-529ad06cf05f","Type":"ContainerStarted","Data":"d61d6cef6fca0529c65f44c34e9efeb13635a69001b7fc5c3f526ccc128b5faa"} Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.953541 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.956017 4995 generic.go:334] "Generic (PLEG): container finished" podID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerID="14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65" exitCode=0 Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.956042 4995 generic.go:334] "Generic (PLEG): container finished" podID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerID="af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737" exitCode=2 Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.956054 4995 generic.go:334] "Generic (PLEG): container finished" podID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerID="606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893" exitCode=0 Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.956094 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerDied","Data":"14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65"} Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.956114 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerDied","Data":"af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737"} Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.956126 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerDied","Data":"606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893"} Jan 20 16:52:47 crc kubenswrapper[4995]: I0120 16:52:47.974630 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.555748713 podStartE2EDuration="2.974606733s" podCreationTimestamp="2026-01-20 16:52:45 +0000 UTC" firstStartedPulling="2026-01-20 16:52:46.824885388 +0000 UTC m=+1285.069490184" lastFinishedPulling="2026-01-20 16:52:47.243743398 +0000 UTC m=+1285.488348204" observedRunningTime="2026-01-20 16:52:47.974117459 +0000 UTC m=+1286.218722285" watchObservedRunningTime="2026-01-20 16:52:47.974606733 +0000 UTC m=+1286.219211559" Jan 20 16:52:48 crc kubenswrapper[4995]: I0120 16:52:48.312469 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 20 16:52:48 crc kubenswrapper[4995]: I0120 16:52:48.573231 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.216:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 16:52:48 crc kubenswrapper[4995]: I0120 16:52:48.573244 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.216:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.384543 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.443449 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-combined-ca-bundle\") pod \"c85c9716-e609-4d13-be5f-19b7867e4b3b\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.443527 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-sg-core-conf-yaml\") pod \"c85c9716-e609-4d13-be5f-19b7867e4b3b\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.443589 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-scripts\") pod \"c85c9716-e609-4d13-be5f-19b7867e4b3b\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.443618 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rmc5\" (UniqueName: \"kubernetes.io/projected/c85c9716-e609-4d13-be5f-19b7867e4b3b-kube-api-access-4rmc5\") pod \"c85c9716-e609-4d13-be5f-19b7867e4b3b\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.443656 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-log-httpd\") pod \"c85c9716-e609-4d13-be5f-19b7867e4b3b\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.443686 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-config-data\") pod \"c85c9716-e609-4d13-be5f-19b7867e4b3b\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.443703 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-run-httpd\") pod \"c85c9716-e609-4d13-be5f-19b7867e4b3b\" (UID: \"c85c9716-e609-4d13-be5f-19b7867e4b3b\") " Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.444467 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c85c9716-e609-4d13-be5f-19b7867e4b3b" (UID: "c85c9716-e609-4d13-be5f-19b7867e4b3b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.445594 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c85c9716-e609-4d13-be5f-19b7867e4b3b" (UID: "c85c9716-e609-4d13-be5f-19b7867e4b3b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.450798 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c85c9716-e609-4d13-be5f-19b7867e4b3b-kube-api-access-4rmc5" (OuterVolumeSpecName: "kube-api-access-4rmc5") pod "c85c9716-e609-4d13-be5f-19b7867e4b3b" (UID: "c85c9716-e609-4d13-be5f-19b7867e4b3b"). InnerVolumeSpecName "kube-api-access-4rmc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.465338 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-scripts" (OuterVolumeSpecName: "scripts") pod "c85c9716-e609-4d13-be5f-19b7867e4b3b" (UID: "c85c9716-e609-4d13-be5f-19b7867e4b3b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.492612 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c85c9716-e609-4d13-be5f-19b7867e4b3b" (UID: "c85c9716-e609-4d13-be5f-19b7867e4b3b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.545988 4995 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.546028 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.546124 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rmc5\" (UniqueName: \"kubernetes.io/projected/c85c9716-e609-4d13-be5f-19b7867e4b3b-kube-api-access-4rmc5\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.546138 4995 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.546149 4995 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c85c9716-e609-4d13-be5f-19b7867e4b3b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.562945 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c85c9716-e609-4d13-be5f-19b7867e4b3b" (UID: "c85c9716-e609-4d13-be5f-19b7867e4b3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.577103 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-config-data" (OuterVolumeSpecName: "config-data") pod "c85c9716-e609-4d13-be5f-19b7867e4b3b" (UID: "c85c9716-e609-4d13-be5f-19b7867e4b3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.647869 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.647903 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c85c9716-e609-4d13-be5f-19b7867e4b3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.978307 4995 generic.go:334] "Generic (PLEG): container finished" podID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerID="41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc" exitCode=0 Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.978349 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.978368 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerDied","Data":"41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc"} Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.978609 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c85c9716-e609-4d13-be5f-19b7867e4b3b","Type":"ContainerDied","Data":"996f929498d013567addeb3bdaa061e402a6af64cfd7b4460b1185b8037ceed1"} Jan 20 16:52:49 crc kubenswrapper[4995]: I0120 16:52:49.978652 4995 scope.go:117] "RemoveContainer" containerID="14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.117797 4995 scope.go:117] "RemoveContainer" containerID="af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.133051 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.162625 4995 scope.go:117] "RemoveContainer" containerID="41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.176646 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.191678 4995 scope.go:117] "RemoveContainer" containerID="606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.193553 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:50 crc kubenswrapper[4995]: E0120 16:52:50.194184 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="sg-core" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.194209 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="sg-core" Jan 20 16:52:50 crc kubenswrapper[4995]: E0120 16:52:50.194222 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="ceilometer-notification-agent" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.194229 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="ceilometer-notification-agent" Jan 20 16:52:50 crc kubenswrapper[4995]: E0120 16:52:50.194255 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="ceilometer-central-agent" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.194264 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="ceilometer-central-agent" Jan 20 16:52:50 crc kubenswrapper[4995]: E0120 16:52:50.194290 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="proxy-httpd" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.194299 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="proxy-httpd" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.194523 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="proxy-httpd" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.194545 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="sg-core" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.194558 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="ceilometer-central-agent" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.194578 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" containerName="ceilometer-notification-agent" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.197316 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.202448 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.202501 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.202448 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.207159 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.219350 4995 scope.go:117] "RemoveContainer" containerID="14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65" Jan 20 16:52:50 crc kubenswrapper[4995]: E0120 16:52:50.220767 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65\": container with ID starting with 14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65 not found: ID does not exist" containerID="14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.220802 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65"} err="failed to get container status \"14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65\": rpc error: code = NotFound desc = could not find container \"14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65\": container with ID starting with 14bcf95a04a9c1f71c8a50c79b1928eb2c6c8be54528388ef0b4c3c106bcde65 not found: ID does not exist" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.220823 4995 scope.go:117] "RemoveContainer" containerID="af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737" Jan 20 16:52:50 crc kubenswrapper[4995]: E0120 16:52:50.221095 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737\": container with ID starting with af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737 not found: ID does not exist" containerID="af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.221118 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737"} err="failed to get container status \"af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737\": rpc error: code = NotFound desc = could not find container \"af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737\": container with ID starting with af7a1215b2181088586f9ff08cd5227b46ec0390668e53b0065da63a85312737 not found: ID does not exist" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.221131 4995 scope.go:117] "RemoveContainer" containerID="41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc" Jan 20 16:52:50 crc kubenswrapper[4995]: E0120 16:52:50.221418 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc\": container with ID starting with 41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc not found: ID does not exist" containerID="41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.221456 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc"} err="failed to get container status \"41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc\": rpc error: code = NotFound desc = could not find container \"41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc\": container with ID starting with 41a23c97f984353326154b98460892ca6fc92ad44fc74d389cc17a3dcfddb2dc not found: ID does not exist" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.221481 4995 scope.go:117] "RemoveContainer" containerID="606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893" Jan 20 16:52:50 crc kubenswrapper[4995]: E0120 16:52:50.221742 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893\": container with ID starting with 606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893 not found: ID does not exist" containerID="606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.221763 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893"} err="failed to get container status \"606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893\": rpc error: code = NotFound desc = could not find container \"606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893\": container with ID starting with 606bbb54bd486a726096363d84a2159c6c77b15cfd7f3517224eb158f55ba893 not found: ID does not exist" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.386591 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-config-data\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.387163 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dj9d5\" (UniqueName: \"kubernetes.io/projected/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-kube-api-access-dj9d5\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.387345 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.387533 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-run-httpd\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.387697 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-log-httpd\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.387906 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.387945 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-scripts\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.388108 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.491234 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dj9d5\" (UniqueName: \"kubernetes.io/projected/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-kube-api-access-dj9d5\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.491386 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.491537 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-run-httpd\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.491701 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-log-httpd\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.491829 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.491892 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-scripts\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.492029 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.492109 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-run-httpd\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.492115 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-log-httpd\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.492244 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-config-data\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.496705 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-scripts\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.496753 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.497508 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.499734 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-config-data\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.506838 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.512405 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dj9d5\" (UniqueName: \"kubernetes.io/projected/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-kube-api-access-dj9d5\") pod \"ceilometer-0\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " pod="openstack/ceilometer-0" Jan 20 16:52:50 crc kubenswrapper[4995]: I0120 16:52:50.521562 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:52:51 crc kubenswrapper[4995]: W0120 16:52:51.022135 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9fd1e30_87d4_40e7_8ef7_c08e593997c8.slice/crio-82cc3af88bbe239f91fd4ea9473cb6de81a89697ba772ac51d712aebc7a2dc4c WatchSource:0}: Error finding container 82cc3af88bbe239f91fd4ea9473cb6de81a89697ba772ac51d712aebc7a2dc4c: Status 404 returned error can't find the container with id 82cc3af88bbe239f91fd4ea9473cb6de81a89697ba772ac51d712aebc7a2dc4c Jan 20 16:52:51 crc kubenswrapper[4995]: I0120 16:52:51.022556 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:52:52 crc kubenswrapper[4995]: I0120 16:52:52.007942 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c85c9716-e609-4d13-be5f-19b7867e4b3b" path="/var/lib/kubelet/pods/c85c9716-e609-4d13-be5f-19b7867e4b3b/volumes" Jan 20 16:52:52 crc kubenswrapper[4995]: I0120 16:52:52.013187 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerStarted","Data":"cf90988bf391997d6f8eeea8bafbc498206d17ad0cff37a439ffda51cc0995f8"} Jan 20 16:52:52 crc kubenswrapper[4995]: I0120 16:52:52.013375 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerStarted","Data":"82cc3af88bbe239f91fd4ea9473cb6de81a89697ba772ac51d712aebc7a2dc4c"} Jan 20 16:52:53 crc kubenswrapper[4995]: I0120 16:52:53.312054 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 20 16:52:53 crc kubenswrapper[4995]: I0120 16:52:53.339520 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 16:52:53 crc kubenswrapper[4995]: I0120 16:52:53.339593 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 16:52:53 crc kubenswrapper[4995]: I0120 16:52:53.355731 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 20 16:52:54 crc kubenswrapper[4995]: I0120 16:52:54.031753 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerStarted","Data":"06c0cbea93f08b60f9573ceec4c838d385d07385eddd35d4a8f604e0f65a6797"} Jan 20 16:52:54 crc kubenswrapper[4995]: I0120 16:52:54.064513 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 20 16:52:54 crc kubenswrapper[4995]: I0120 16:52:54.422289 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.218:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 16:52:54 crc kubenswrapper[4995]: I0120 16:52:54.422305 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.218:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 16:52:55 crc kubenswrapper[4995]: I0120 16:52:55.050241 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerStarted","Data":"1cc62f0772e6894acf8efdb1ad7b260e02c108d9594a3beb6fb9c507ac8c5eec"} Jan 20 16:52:56 crc kubenswrapper[4995]: I0120 16:52:56.345372 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 20 16:52:57 crc kubenswrapper[4995]: I0120 16:52:57.565023 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 20 16:52:57 crc kubenswrapper[4995]: I0120 16:52:57.565696 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 20 16:52:57 crc kubenswrapper[4995]: I0120 16:52:57.571150 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 20 16:52:58 crc kubenswrapper[4995]: I0120 16:52:58.101841 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 20 16:53:00 crc kubenswrapper[4995]: I0120 16:53:00.136812 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerStarted","Data":"4cd27e363bcab137225463d2b3229845ce0d8b62cd8d21d8a9835d0125af8767"} Jan 20 16:53:00 crc kubenswrapper[4995]: I0120 16:53:00.172017 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.873292943 podStartE2EDuration="10.171997865s" podCreationTimestamp="2026-01-20 16:52:50 +0000 UTC" firstStartedPulling="2026-01-20 16:52:51.026155182 +0000 UTC m=+1289.270759988" lastFinishedPulling="2026-01-20 16:52:59.324860094 +0000 UTC m=+1297.569464910" observedRunningTime="2026-01-20 16:53:00.166448744 +0000 UTC m=+1298.411053540" watchObservedRunningTime="2026-01-20 16:53:00.171997865 +0000 UTC m=+1298.416602691" Jan 20 16:53:00 crc kubenswrapper[4995]: I0120 16:53:00.572001 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:53:00 crc kubenswrapper[4995]: I0120 16:53:00.572057 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:53:01 crc kubenswrapper[4995]: I0120 16:53:01.147663 4995 generic.go:334] "Generic (PLEG): container finished" podID="c72f2733-e3bf-4064-9a92-9e802fd8cf9e" containerID="ffcaba083debd4d1dc554202803fc5925daa05eb5fdbd7300f004f19ef4760b3" exitCode=137 Jan 20 16:53:01 crc kubenswrapper[4995]: I0120 16:53:01.147725 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c72f2733-e3bf-4064-9a92-9e802fd8cf9e","Type":"ContainerDied","Data":"ffcaba083debd4d1dc554202803fc5925daa05eb5fdbd7300f004f19ef4760b3"} Jan 20 16:53:01 crc kubenswrapper[4995]: I0120 16:53:01.148415 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 16:53:01 crc kubenswrapper[4995]: I0120 16:53:01.969774 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.131835 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-combined-ca-bundle\") pod \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.132137 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvt9t\" (UniqueName: \"kubernetes.io/projected/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-kube-api-access-dvt9t\") pod \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.132167 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-config-data\") pod \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\" (UID: \"c72f2733-e3bf-4064-9a92-9e802fd8cf9e\") " Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.140955 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-kube-api-access-dvt9t" (OuterVolumeSpecName: "kube-api-access-dvt9t") pod "c72f2733-e3bf-4064-9a92-9e802fd8cf9e" (UID: "c72f2733-e3bf-4064-9a92-9e802fd8cf9e"). InnerVolumeSpecName "kube-api-access-dvt9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.158347 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c72f2733-e3bf-4064-9a92-9e802fd8cf9e" (UID: "c72f2733-e3bf-4064-9a92-9e802fd8cf9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.159539 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.159541 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"c72f2733-e3bf-4064-9a92-9e802fd8cf9e","Type":"ContainerDied","Data":"3c2bdfdf682696bac01eee5b672d85023ef5b211086b26094765598640c59adb"} Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.159875 4995 scope.go:117] "RemoveContainer" containerID="ffcaba083debd4d1dc554202803fc5925daa05eb5fdbd7300f004f19ef4760b3" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.163139 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-config-data" (OuterVolumeSpecName: "config-data") pod "c72f2733-e3bf-4064-9a92-9e802fd8cf9e" (UID: "c72f2733-e3bf-4064-9a92-9e802fd8cf9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.234523 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvt9t\" (UniqueName: \"kubernetes.io/projected/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-kube-api-access-dvt9t\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.234559 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.234572 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c72f2733-e3bf-4064-9a92-9e802fd8cf9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.498161 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.508749 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.522403 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 16:53:02 crc kubenswrapper[4995]: E0120 16:53:02.522816 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c72f2733-e3bf-4064-9a92-9e802fd8cf9e" containerName="nova-cell1-novncproxy-novncproxy" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.522841 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c72f2733-e3bf-4064-9a92-9e802fd8cf9e" containerName="nova-cell1-novncproxy-novncproxy" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.523006 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c72f2733-e3bf-4064-9a92-9e802fd8cf9e" containerName="nova-cell1-novncproxy-novncproxy" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.523643 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.525771 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.526272 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.526305 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.541172 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.543535 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jd25r\" (UniqueName: \"kubernetes.io/projected/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-kube-api-access-jd25r\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.543692 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.543796 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.543829 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.543914 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.645403 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.645497 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.645519 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.645536 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.645603 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jd25r\" (UniqueName: \"kubernetes.io/projected/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-kube-api-access-jd25r\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.650379 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.658648 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.658890 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.666058 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.667437 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jd25r\" (UniqueName: \"kubernetes.io/projected/2a0df207-0ec6-420e-9f84-7ea1d4e6b469-kube-api-access-jd25r\") pod \"nova-cell1-novncproxy-0\" (UID: \"2a0df207-0ec6-420e-9f84-7ea1d4e6b469\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:02 crc kubenswrapper[4995]: I0120 16:53:02.842702 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:03 crc kubenswrapper[4995]: I0120 16:53:03.343028 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 20 16:53:03 crc kubenswrapper[4995]: I0120 16:53:03.344316 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 20 16:53:03 crc kubenswrapper[4995]: I0120 16:53:03.344718 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 20 16:53:03 crc kubenswrapper[4995]: I0120 16:53:03.347600 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 20 16:53:03 crc kubenswrapper[4995]: I0120 16:53:03.878398 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.011201 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c72f2733-e3bf-4064-9a92-9e802fd8cf9e" path="/var/lib/kubelet/pods/c72f2733-e3bf-4064-9a92-9e802fd8cf9e/volumes" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.194232 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2a0df207-0ec6-420e-9f84-7ea1d4e6b469","Type":"ContainerStarted","Data":"ef84261fdc5d17213f5806c1e03022ac08110bd802dc63cbfe4ddeffd6bcc3d1"} Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.194574 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.198951 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.215630 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.215614251 podStartE2EDuration="2.215614251s" podCreationTimestamp="2026-01-20 16:53:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:53:04.211130629 +0000 UTC m=+1302.455735435" watchObservedRunningTime="2026-01-20 16:53:04.215614251 +0000 UTC m=+1302.460219057" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.391714 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-tv7m9"] Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.394043 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.405913 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-tv7m9"] Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.481804 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.481906 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.481945 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-config\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.481984 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.482054 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rngs9\" (UniqueName: \"kubernetes.io/projected/d205e258-5f50-4d69-a33f-37bce8c1d479-kube-api-access-rngs9\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.482149 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.583551 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rngs9\" (UniqueName: \"kubernetes.io/projected/d205e258-5f50-4d69-a33f-37bce8c1d479-kube-api-access-rngs9\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.583631 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.583720 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.583770 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.583809 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-config\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.583858 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.585140 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.585151 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.585166 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.585174 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.585391 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-config\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.608238 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rngs9\" (UniqueName: \"kubernetes.io/projected/d205e258-5f50-4d69-a33f-37bce8c1d479-kube-api-access-rngs9\") pod \"dnsmasq-dns-89c5cd4d5-tv7m9\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:04 crc kubenswrapper[4995]: I0120 16:53:04.712546 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:05 crc kubenswrapper[4995]: I0120 16:53:05.176792 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-tv7m9"] Jan 20 16:53:05 crc kubenswrapper[4995]: W0120 16:53:05.179135 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd205e258_5f50_4d69_a33f_37bce8c1d479.slice/crio-b0e58c8867415f2a362db07310d090d6c42db4d673a4f2076ce9e3d330e3a27d WatchSource:0}: Error finding container b0e58c8867415f2a362db07310d090d6c42db4d673a4f2076ce9e3d330e3a27d: Status 404 returned error can't find the container with id b0e58c8867415f2a362db07310d090d6c42db4d673a4f2076ce9e3d330e3a27d Jan 20 16:53:05 crc kubenswrapper[4995]: I0120 16:53:05.205122 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" event={"ID":"d205e258-5f50-4d69-a33f-37bce8c1d479","Type":"ContainerStarted","Data":"b0e58c8867415f2a362db07310d090d6c42db4d673a4f2076ce9e3d330e3a27d"} Jan 20 16:53:05 crc kubenswrapper[4995]: I0120 16:53:05.207945 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2a0df207-0ec6-420e-9f84-7ea1d4e6b469","Type":"ContainerStarted","Data":"3ffb0dac926668e8849b835ddb9f75dcb1f9494728b733b2446e8921f5aaa8f5"} Jan 20 16:53:06 crc kubenswrapper[4995]: I0120 16:53:06.218672 4995 generic.go:334] "Generic (PLEG): container finished" podID="d205e258-5f50-4d69-a33f-37bce8c1d479" containerID="4828abc5495fb3b236ef08dadd2aeb646e583eb524db99208cf9b6b06a763ca6" exitCode=0 Jan 20 16:53:06 crc kubenswrapper[4995]: I0120 16:53:06.218821 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" event={"ID":"d205e258-5f50-4d69-a33f-37bce8c1d479","Type":"ContainerDied","Data":"4828abc5495fb3b236ef08dadd2aeb646e583eb524db99208cf9b6b06a763ca6"} Jan 20 16:53:06 crc kubenswrapper[4995]: I0120 16:53:06.481383 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:06 crc kubenswrapper[4995]: I0120 16:53:06.646809 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:53:06 crc kubenswrapper[4995]: I0120 16:53:06.647232 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="ceilometer-central-agent" containerID="cri-o://cf90988bf391997d6f8eeea8bafbc498206d17ad0cff37a439ffda51cc0995f8" gracePeriod=30 Jan 20 16:53:06 crc kubenswrapper[4995]: I0120 16:53:06.647663 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="proxy-httpd" containerID="cri-o://4cd27e363bcab137225463d2b3229845ce0d8b62cd8d21d8a9835d0125af8767" gracePeriod=30 Jan 20 16:53:06 crc kubenswrapper[4995]: I0120 16:53:06.647731 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="sg-core" containerID="cri-o://1cc62f0772e6894acf8efdb1ad7b260e02c108d9594a3beb6fb9c507ac8c5eec" gracePeriod=30 Jan 20 16:53:06 crc kubenswrapper[4995]: I0120 16:53:06.647773 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="ceilometer-notification-agent" containerID="cri-o://06c0cbea93f08b60f9573ceec4c838d385d07385eddd35d4a8f604e0f65a6797" gracePeriod=30 Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.228582 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" event={"ID":"d205e258-5f50-4d69-a33f-37bce8c1d479","Type":"ContainerStarted","Data":"ab3877b92388988d0b8ec4c03964c1280d4626ae669d98163d1720204a9e04c4"} Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.228962 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.230420 4995 generic.go:334] "Generic (PLEG): container finished" podID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerID="4cd27e363bcab137225463d2b3229845ce0d8b62cd8d21d8a9835d0125af8767" exitCode=0 Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.230453 4995 generic.go:334] "Generic (PLEG): container finished" podID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerID="1cc62f0772e6894acf8efdb1ad7b260e02c108d9594a3beb6fb9c507ac8c5eec" exitCode=2 Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.230465 4995 generic.go:334] "Generic (PLEG): container finished" podID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerID="cf90988bf391997d6f8eeea8bafbc498206d17ad0cff37a439ffda51cc0995f8" exitCode=0 Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.230534 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerDied","Data":"4cd27e363bcab137225463d2b3229845ce0d8b62cd8d21d8a9835d0125af8767"} Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.230571 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerDied","Data":"1cc62f0772e6894acf8efdb1ad7b260e02c108d9594a3beb6fb9c507ac8c5eec"} Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.230583 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerDied","Data":"cf90988bf391997d6f8eeea8bafbc498206d17ad0cff37a439ffda51cc0995f8"} Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.230671 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-log" containerID="cri-o://500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935" gracePeriod=30 Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.230695 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-api" containerID="cri-o://b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e" gracePeriod=30 Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.287633 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" podStartSLOduration=3.287614945 podStartE2EDuration="3.287614945s" podCreationTimestamp="2026-01-20 16:53:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:53:07.285847097 +0000 UTC m=+1305.530451893" watchObservedRunningTime="2026-01-20 16:53:07.287614945 +0000 UTC m=+1305.532219761" Jan 20 16:53:07 crc kubenswrapper[4995]: I0120 16:53:07.842896 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:08 crc kubenswrapper[4995]: I0120 16:53:08.239610 4995 generic.go:334] "Generic (PLEG): container finished" podID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerID="500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935" exitCode=143 Jan 20 16:53:08 crc kubenswrapper[4995]: I0120 16:53:08.239723 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"82c2fe85-b2a1-49b9-88d3-811aefa72dab","Type":"ContainerDied","Data":"500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935"} Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.278103 4995 generic.go:334] "Generic (PLEG): container finished" podID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerID="06c0cbea93f08b60f9573ceec4c838d385d07385eddd35d4a8f604e0f65a6797" exitCode=0 Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.278132 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerDied","Data":"06c0cbea93f08b60f9573ceec4c838d385d07385eddd35d4a8f604e0f65a6797"} Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.456304 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.594014 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-scripts\") pod \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.594130 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-run-httpd\") pod \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.594202 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dj9d5\" (UniqueName: \"kubernetes.io/projected/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-kube-api-access-dj9d5\") pod \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.594260 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-log-httpd\") pod \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.594294 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-sg-core-conf-yaml\") pod \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.594326 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-config-data\") pod \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.594806 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e9fd1e30-87d4-40e7-8ef7-c08e593997c8" (UID: "e9fd1e30-87d4-40e7-8ef7-c08e593997c8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.594866 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e9fd1e30-87d4-40e7-8ef7-c08e593997c8" (UID: "e9fd1e30-87d4-40e7-8ef7-c08e593997c8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.595314 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-combined-ca-bundle\") pod \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.595356 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-ceilometer-tls-certs\") pod \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\" (UID: \"e9fd1e30-87d4-40e7-8ef7-c08e593997c8\") " Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.595933 4995 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.595963 4995 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.599529 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-scripts" (OuterVolumeSpecName: "scripts") pod "e9fd1e30-87d4-40e7-8ef7-c08e593997c8" (UID: "e9fd1e30-87d4-40e7-8ef7-c08e593997c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.604258 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-kube-api-access-dj9d5" (OuterVolumeSpecName: "kube-api-access-dj9d5") pod "e9fd1e30-87d4-40e7-8ef7-c08e593997c8" (UID: "e9fd1e30-87d4-40e7-8ef7-c08e593997c8"). InnerVolumeSpecName "kube-api-access-dj9d5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.628927 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e9fd1e30-87d4-40e7-8ef7-c08e593997c8" (UID: "e9fd1e30-87d4-40e7-8ef7-c08e593997c8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.650300 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e9fd1e30-87d4-40e7-8ef7-c08e593997c8" (UID: "e9fd1e30-87d4-40e7-8ef7-c08e593997c8"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.670572 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9fd1e30-87d4-40e7-8ef7-c08e593997c8" (UID: "e9fd1e30-87d4-40e7-8ef7-c08e593997c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.697467 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dj9d5\" (UniqueName: \"kubernetes.io/projected/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-kube-api-access-dj9d5\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.697496 4995 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.697505 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.697515 4995 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.697523 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.723850 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-config-data" (OuterVolumeSpecName: "config-data") pod "e9fd1e30-87d4-40e7-8ef7-c08e593997c8" (UID: "e9fd1e30-87d4-40e7-8ef7-c08e593997c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:09 crc kubenswrapper[4995]: I0120 16:53:09.799673 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9fd1e30-87d4-40e7-8ef7-c08e593997c8-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.289671 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e9fd1e30-87d4-40e7-8ef7-c08e593997c8","Type":"ContainerDied","Data":"82cc3af88bbe239f91fd4ea9473cb6de81a89697ba772ac51d712aebc7a2dc4c"} Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.289746 4995 scope.go:117] "RemoveContainer" containerID="4cd27e363bcab137225463d2b3229845ce0d8b62cd8d21d8a9835d0125af8767" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.289802 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.323030 4995 scope.go:117] "RemoveContainer" containerID="1cc62f0772e6894acf8efdb1ad7b260e02c108d9594a3beb6fb9c507ac8c5eec" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.355345 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.358578 4995 scope.go:117] "RemoveContainer" containerID="06c0cbea93f08b60f9573ceec4c838d385d07385eddd35d4a8f604e0f65a6797" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.361577 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.372729 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:53:10 crc kubenswrapper[4995]: E0120 16:53:10.373419 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="sg-core" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.373445 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="sg-core" Jan 20 16:53:10 crc kubenswrapper[4995]: E0120 16:53:10.373465 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="ceilometer-central-agent" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.373476 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="ceilometer-central-agent" Jan 20 16:53:10 crc kubenswrapper[4995]: E0120 16:53:10.373489 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="ceilometer-notification-agent" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.373497 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="ceilometer-notification-agent" Jan 20 16:53:10 crc kubenswrapper[4995]: E0120 16:53:10.373509 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="proxy-httpd" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.373517 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="proxy-httpd" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.373847 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="proxy-httpd" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.373981 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="ceilometer-notification-agent" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.374152 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="sg-core" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.374370 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" containerName="ceilometer-central-agent" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.380437 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.386463 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.386700 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.386470 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.386470 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.396290 4995 scope.go:117] "RemoveContainer" containerID="cf90988bf391997d6f8eeea8bafbc498206d17ad0cff37a439ffda51cc0995f8" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.519025 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c5gw\" (UniqueName: \"kubernetes.io/projected/31630944-4dd8-4460-b8b3-d87157e2a0ef-kube-api-access-5c5gw\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.519094 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-config-data\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.519113 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.519133 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-scripts\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.519157 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/31630944-4dd8-4460-b8b3-d87157e2a0ef-run-httpd\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.519314 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.519617 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/31630944-4dd8-4460-b8b3-d87157e2a0ef-log-httpd\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.519659 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.621331 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/31630944-4dd8-4460-b8b3-d87157e2a0ef-log-httpd\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.621370 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.621432 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c5gw\" (UniqueName: \"kubernetes.io/projected/31630944-4dd8-4460-b8b3-d87157e2a0ef-kube-api-access-5c5gw\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.621458 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-config-data\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.621471 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.621493 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-scripts\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.621517 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/31630944-4dd8-4460-b8b3-d87157e2a0ef-run-httpd\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.621561 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.621822 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/31630944-4dd8-4460-b8b3-d87157e2a0ef-log-httpd\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.622235 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/31630944-4dd8-4460-b8b3-d87157e2a0ef-run-httpd\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.626261 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.632944 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-scripts\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.634822 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-config-data\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.636114 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.647927 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/31630944-4dd8-4460-b8b3-d87157e2a0ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.654901 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c5gw\" (UniqueName: \"kubernetes.io/projected/31630944-4dd8-4460-b8b3-d87157e2a0ef-kube-api-access-5c5gw\") pod \"ceilometer-0\" (UID: \"31630944-4dd8-4460-b8b3-d87157e2a0ef\") " pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.699931 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 16:53:10 crc kubenswrapper[4995]: I0120 16:53:10.875513 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.029307 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6nc8\" (UniqueName: \"kubernetes.io/projected/82c2fe85-b2a1-49b9-88d3-811aefa72dab-kube-api-access-j6nc8\") pod \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.029510 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82c2fe85-b2a1-49b9-88d3-811aefa72dab-logs\") pod \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.029545 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-combined-ca-bundle\") pod \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.029639 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-config-data\") pod \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\" (UID: \"82c2fe85-b2a1-49b9-88d3-811aefa72dab\") " Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.030603 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82c2fe85-b2a1-49b9-88d3-811aefa72dab-logs" (OuterVolumeSpecName: "logs") pod "82c2fe85-b2a1-49b9-88d3-811aefa72dab" (UID: "82c2fe85-b2a1-49b9-88d3-811aefa72dab"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.034759 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82c2fe85-b2a1-49b9-88d3-811aefa72dab-kube-api-access-j6nc8" (OuterVolumeSpecName: "kube-api-access-j6nc8") pod "82c2fe85-b2a1-49b9-88d3-811aefa72dab" (UID: "82c2fe85-b2a1-49b9-88d3-811aefa72dab"). InnerVolumeSpecName "kube-api-access-j6nc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.057382 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82c2fe85-b2a1-49b9-88d3-811aefa72dab" (UID: "82c2fe85-b2a1-49b9-88d3-811aefa72dab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.067975 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-config-data" (OuterVolumeSpecName: "config-data") pod "82c2fe85-b2a1-49b9-88d3-811aefa72dab" (UID: "82c2fe85-b2a1-49b9-88d3-811aefa72dab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.131854 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82c2fe85-b2a1-49b9-88d3-811aefa72dab-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.131898 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.131913 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82c2fe85-b2a1-49b9-88d3-811aefa72dab-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.131927 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6nc8\" (UniqueName: \"kubernetes.io/projected/82c2fe85-b2a1-49b9-88d3-811aefa72dab-kube-api-access-j6nc8\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.229899 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 16:53:11 crc kubenswrapper[4995]: W0120 16:53:11.236808 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31630944_4dd8_4460_b8b3_d87157e2a0ef.slice/crio-33e1a2c932881ce6213ab29d7f27d5aafb973bdd66fe8f3579ad046e6d134ac9 WatchSource:0}: Error finding container 33e1a2c932881ce6213ab29d7f27d5aafb973bdd66fe8f3579ad046e6d134ac9: Status 404 returned error can't find the container with id 33e1a2c932881ce6213ab29d7f27d5aafb973bdd66fe8f3579ad046e6d134ac9 Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.304355 4995 generic.go:334] "Generic (PLEG): container finished" podID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerID="b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e" exitCode=0 Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.304637 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.304636 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"82c2fe85-b2a1-49b9-88d3-811aefa72dab","Type":"ContainerDied","Data":"b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e"} Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.304807 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"82c2fe85-b2a1-49b9-88d3-811aefa72dab","Type":"ContainerDied","Data":"3d8411c72a5bed73fec7688703d56d1e06e402a31925b2574ff48be689e18a0c"} Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.304833 4995 scope.go:117] "RemoveContainer" containerID="b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.306353 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"31630944-4dd8-4460-b8b3-d87157e2a0ef","Type":"ContainerStarted","Data":"33e1a2c932881ce6213ab29d7f27d5aafb973bdd66fe8f3579ad046e6d134ac9"} Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.327637 4995 scope.go:117] "RemoveContainer" containerID="500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.351304 4995 scope.go:117] "RemoveContainer" containerID="b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e" Jan 20 16:53:11 crc kubenswrapper[4995]: E0120 16:53:11.351789 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e\": container with ID starting with b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e not found: ID does not exist" containerID="b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.351812 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e"} err="failed to get container status \"b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e\": rpc error: code = NotFound desc = could not find container \"b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e\": container with ID starting with b42a6f46e5b5dd70df70fa92d53f6570b3b8b5452474926ed4b0197f8611f11e not found: ID does not exist" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.351842 4995 scope.go:117] "RemoveContainer" containerID="500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935" Jan 20 16:53:11 crc kubenswrapper[4995]: E0120 16:53:11.352153 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935\": container with ID starting with 500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935 not found: ID does not exist" containerID="500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.352195 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935"} err="failed to get container status \"500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935\": rpc error: code = NotFound desc = could not find container \"500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935\": container with ID starting with 500dd39aefc1d0719c90f1d137d7e30ca17cfe937440c87cd1269ed8670fd935 not found: ID does not exist" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.358563 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.375976 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.385495 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:11 crc kubenswrapper[4995]: E0120 16:53:11.385902 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-api" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.385920 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-api" Jan 20 16:53:11 crc kubenswrapper[4995]: E0120 16:53:11.385952 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-log" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.385960 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-log" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.386167 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-log" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.386192 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" containerName="nova-api-api" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.387201 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.395089 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.395221 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.395239 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.416754 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.538367 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.538679 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s55q7\" (UniqueName: \"kubernetes.io/projected/1b0570e1-6ac2-41ce-8905-e59a54e06a31-kube-api-access-s55q7\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.538767 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b0570e1-6ac2-41ce-8905-e59a54e06a31-logs\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.538819 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-public-tls-certs\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.538906 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-internal-tls-certs\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.538934 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-config-data\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.640683 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.640733 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s55q7\" (UniqueName: \"kubernetes.io/projected/1b0570e1-6ac2-41ce-8905-e59a54e06a31-kube-api-access-s55q7\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.640814 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b0570e1-6ac2-41ce-8905-e59a54e06a31-logs\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.640858 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-public-tls-certs\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.640941 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-internal-tls-certs\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.640969 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-config-data\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.641746 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b0570e1-6ac2-41ce-8905-e59a54e06a31-logs\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.644900 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.645407 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-internal-tls-certs\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.645535 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-public-tls-certs\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.645969 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-config-data\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.660284 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s55q7\" (UniqueName: \"kubernetes.io/projected/1b0570e1-6ac2-41ce-8905-e59a54e06a31-kube-api-access-s55q7\") pod \"nova-api-0\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " pod="openstack/nova-api-0" Jan 20 16:53:11 crc kubenswrapper[4995]: I0120 16:53:11.711660 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:53:12 crc kubenswrapper[4995]: I0120 16:53:11.999830 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82c2fe85-b2a1-49b9-88d3-811aefa72dab" path="/var/lib/kubelet/pods/82c2fe85-b2a1-49b9-88d3-811aefa72dab/volumes" Jan 20 16:53:12 crc kubenswrapper[4995]: I0120 16:53:12.001303 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9fd1e30-87d4-40e7-8ef7-c08e593997c8" path="/var/lib/kubelet/pods/e9fd1e30-87d4-40e7-8ef7-c08e593997c8/volumes" Jan 20 16:53:12 crc kubenswrapper[4995]: I0120 16:53:12.166447 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:12 crc kubenswrapper[4995]: W0120 16:53:12.169230 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b0570e1_6ac2_41ce_8905_e59a54e06a31.slice/crio-e8a273f5902eae7b3fa470cf31cf750c3181d52fd3d422a6bc78d9a303b71f1d WatchSource:0}: Error finding container e8a273f5902eae7b3fa470cf31cf750c3181d52fd3d422a6bc78d9a303b71f1d: Status 404 returned error can't find the container with id e8a273f5902eae7b3fa470cf31cf750c3181d52fd3d422a6bc78d9a303b71f1d Jan 20 16:53:12 crc kubenswrapper[4995]: I0120 16:53:12.319701 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b0570e1-6ac2-41ce-8905-e59a54e06a31","Type":"ContainerStarted","Data":"e8a273f5902eae7b3fa470cf31cf750c3181d52fd3d422a6bc78d9a303b71f1d"} Jan 20 16:53:12 crc kubenswrapper[4995]: I0120 16:53:12.842825 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:12 crc kubenswrapper[4995]: I0120 16:53:12.867668 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.335499 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"31630944-4dd8-4460-b8b3-d87157e2a0ef","Type":"ContainerStarted","Data":"662265bd78cd9c55a86e4840ecb59f697996f9e539dd80d6355fc17425e5e2cd"} Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.336353 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"31630944-4dd8-4460-b8b3-d87157e2a0ef","Type":"ContainerStarted","Data":"d5a2a43401222a24b52065e0d890d7f925ae39e94cb3dd4baea07184f4f5553d"} Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.339241 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b0570e1-6ac2-41ce-8905-e59a54e06a31","Type":"ContainerStarted","Data":"9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773"} Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.339298 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b0570e1-6ac2-41ce-8905-e59a54e06a31","Type":"ContainerStarted","Data":"5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a"} Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.362186 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.362164462 podStartE2EDuration="2.362164462s" podCreationTimestamp="2026-01-20 16:53:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:53:13.35728338 +0000 UTC m=+1311.601888186" watchObservedRunningTime="2026-01-20 16:53:13.362164462 +0000 UTC m=+1311.606769278" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.364909 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.517291 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-gp9bb"] Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.518501 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.521557 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.521817 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.534986 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gp9bb"] Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.681946 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-scripts\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.681987 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-config-data\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.682060 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.682227 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tm8cq\" (UniqueName: \"kubernetes.io/projected/3f5ec115-81c7-46cc-bf53-b48157c83915-kube-api-access-tm8cq\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.784394 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.784476 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tm8cq\" (UniqueName: \"kubernetes.io/projected/3f5ec115-81c7-46cc-bf53-b48157c83915-kube-api-access-tm8cq\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.784589 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-scripts\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.784613 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-config-data\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.790152 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-scripts\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.790347 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.799547 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-config-data\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.803829 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tm8cq\" (UniqueName: \"kubernetes.io/projected/3f5ec115-81c7-46cc-bf53-b48157c83915-kube-api-access-tm8cq\") pod \"nova-cell1-cell-mapping-gp9bb\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:13 crc kubenswrapper[4995]: I0120 16:53:13.848924 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:14 crc kubenswrapper[4995]: I0120 16:53:14.320351 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gp9bb"] Jan 20 16:53:14 crc kubenswrapper[4995]: W0120 16:53:14.326215 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f5ec115_81c7_46cc_bf53_b48157c83915.slice/crio-6032bef3fd8a053bdf6231388e193cb134b742c0e2833764532891b73eb36178 WatchSource:0}: Error finding container 6032bef3fd8a053bdf6231388e193cb134b742c0e2833764532891b73eb36178: Status 404 returned error can't find the container with id 6032bef3fd8a053bdf6231388e193cb134b742c0e2833764532891b73eb36178 Jan 20 16:53:14 crc kubenswrapper[4995]: I0120 16:53:14.355056 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gp9bb" event={"ID":"3f5ec115-81c7-46cc-bf53-b48157c83915","Type":"ContainerStarted","Data":"6032bef3fd8a053bdf6231388e193cb134b742c0e2833764532891b73eb36178"} Jan 20 16:53:14 crc kubenswrapper[4995]: I0120 16:53:14.357943 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"31630944-4dd8-4460-b8b3-d87157e2a0ef","Type":"ContainerStarted","Data":"1888c55485701ded6571f40ae52a7659939921d5c49a52ed7b0fbf4711adebcf"} Jan 20 16:53:14 crc kubenswrapper[4995]: I0120 16:53:14.714242 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:53:14 crc kubenswrapper[4995]: I0120 16:53:14.809658 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2qxp5"] Jan 20 16:53:14 crc kubenswrapper[4995]: I0120 16:53:14.809879 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" podUID="7df7445f-fe61-4af9-9f97-6edeab0ab979" containerName="dnsmasq-dns" containerID="cri-o://297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603" gracePeriod=10 Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.287718 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.390581 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gp9bb" event={"ID":"3f5ec115-81c7-46cc-bf53-b48157c83915","Type":"ContainerStarted","Data":"3aba7627c4bf070ffa879b48848eb4db2271378f8655845f107719e00f1345be"} Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.394047 4995 generic.go:334] "Generic (PLEG): container finished" podID="7df7445f-fe61-4af9-9f97-6edeab0ab979" containerID="297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603" exitCode=0 Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.394165 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" event={"ID":"7df7445f-fe61-4af9-9f97-6edeab0ab979","Type":"ContainerDied","Data":"297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603"} Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.394211 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" event={"ID":"7df7445f-fe61-4af9-9f97-6edeab0ab979","Type":"ContainerDied","Data":"b99addc256e72c4236da4a7b17bf7701e0313a351f679b0e2305b707233021d8"} Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.394255 4995 scope.go:117] "RemoveContainer" containerID="297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.394267 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.414921 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-gp9bb" podStartSLOduration=2.414900164 podStartE2EDuration="2.414900164s" podCreationTimestamp="2026-01-20 16:53:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:53:15.410446562 +0000 UTC m=+1313.655051368" watchObservedRunningTime="2026-01-20 16:53:15.414900164 +0000 UTC m=+1313.659504970" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.419442 4995 scope.go:117] "RemoveContainer" containerID="e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.427013 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-swift-storage-0\") pod \"7df7445f-fe61-4af9-9f97-6edeab0ab979\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.427104 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6gp72\" (UniqueName: \"kubernetes.io/projected/7df7445f-fe61-4af9-9f97-6edeab0ab979-kube-api-access-6gp72\") pod \"7df7445f-fe61-4af9-9f97-6edeab0ab979\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.427165 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-sb\") pod \"7df7445f-fe61-4af9-9f97-6edeab0ab979\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.427198 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-config\") pod \"7df7445f-fe61-4af9-9f97-6edeab0ab979\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.427342 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-nb\") pod \"7df7445f-fe61-4af9-9f97-6edeab0ab979\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.427444 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-svc\") pod \"7df7445f-fe61-4af9-9f97-6edeab0ab979\" (UID: \"7df7445f-fe61-4af9-9f97-6edeab0ab979\") " Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.456226 4995 scope.go:117] "RemoveContainer" containerID="297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.458751 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7df7445f-fe61-4af9-9f97-6edeab0ab979-kube-api-access-6gp72" (OuterVolumeSpecName: "kube-api-access-6gp72") pod "7df7445f-fe61-4af9-9f97-6edeab0ab979" (UID: "7df7445f-fe61-4af9-9f97-6edeab0ab979"). InnerVolumeSpecName "kube-api-access-6gp72". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:53:15 crc kubenswrapper[4995]: E0120 16:53:15.459490 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603\": container with ID starting with 297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603 not found: ID does not exist" containerID="297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.459540 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603"} err="failed to get container status \"297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603\": rpc error: code = NotFound desc = could not find container \"297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603\": container with ID starting with 297ae4dc5ee30b833794d1905a3a78cb46360695a9a609affa053388bc502603 not found: ID does not exist" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.459574 4995 scope.go:117] "RemoveContainer" containerID="e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196" Jan 20 16:53:15 crc kubenswrapper[4995]: E0120 16:53:15.460011 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196\": container with ID starting with e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196 not found: ID does not exist" containerID="e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.460043 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196"} err="failed to get container status \"e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196\": rpc error: code = NotFound desc = could not find container \"e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196\": container with ID starting with e891a451003f1475036eee5211df2d0e491574ad35a3f12ca881de5ea8a89196 not found: ID does not exist" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.487029 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7df7445f-fe61-4af9-9f97-6edeab0ab979" (UID: "7df7445f-fe61-4af9-9f97-6edeab0ab979"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.487858 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7df7445f-fe61-4af9-9f97-6edeab0ab979" (UID: "7df7445f-fe61-4af9-9f97-6edeab0ab979"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.495271 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-config" (OuterVolumeSpecName: "config") pod "7df7445f-fe61-4af9-9f97-6edeab0ab979" (UID: "7df7445f-fe61-4af9-9f97-6edeab0ab979"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.501300 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7df7445f-fe61-4af9-9f97-6edeab0ab979" (UID: "7df7445f-fe61-4af9-9f97-6edeab0ab979"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.502260 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7df7445f-fe61-4af9-9f97-6edeab0ab979" (UID: "7df7445f-fe61-4af9-9f97-6edeab0ab979"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.531688 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6gp72\" (UniqueName: \"kubernetes.io/projected/7df7445f-fe61-4af9-9f97-6edeab0ab979-kube-api-access-6gp72\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.531723 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.531737 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.531748 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.531759 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.531769 4995 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7df7445f-fe61-4af9-9f97-6edeab0ab979-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.747855 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2qxp5"] Jan 20 16:53:15 crc kubenswrapper[4995]: I0120 16:53:15.756426 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-2qxp5"] Jan 20 16:53:16 crc kubenswrapper[4995]: I0120 16:53:16.010151 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7df7445f-fe61-4af9-9f97-6edeab0ab979" path="/var/lib/kubelet/pods/7df7445f-fe61-4af9-9f97-6edeab0ab979/volumes" Jan 20 16:53:16 crc kubenswrapper[4995]: I0120 16:53:16.423171 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"31630944-4dd8-4460-b8b3-d87157e2a0ef","Type":"ContainerStarted","Data":"b4ae164d8652ecbb21ac2dc90b3a359e712323ae31679e123e18f0daad9ec0eb"} Jan 20 16:53:16 crc kubenswrapper[4995]: I0120 16:53:16.424378 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 16:53:16 crc kubenswrapper[4995]: I0120 16:53:16.446152 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.063948522 podStartE2EDuration="6.446134261s" podCreationTimestamp="2026-01-20 16:53:10 +0000 UTC" firstStartedPulling="2026-01-20 16:53:11.241928012 +0000 UTC m=+1309.486532818" lastFinishedPulling="2026-01-20 16:53:15.624113751 +0000 UTC m=+1313.868718557" observedRunningTime="2026-01-20 16:53:16.442167624 +0000 UTC m=+1314.686772430" watchObservedRunningTime="2026-01-20 16:53:16.446134261 +0000 UTC m=+1314.690739067" Jan 20 16:53:19 crc kubenswrapper[4995]: I0120 16:53:19.464455 4995 generic.go:334] "Generic (PLEG): container finished" podID="3f5ec115-81c7-46cc-bf53-b48157c83915" containerID="3aba7627c4bf070ffa879b48848eb4db2271378f8655845f107719e00f1345be" exitCode=0 Jan 20 16:53:19 crc kubenswrapper[4995]: I0120 16:53:19.464679 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gp9bb" event={"ID":"3f5ec115-81c7-46cc-bf53-b48157c83915","Type":"ContainerDied","Data":"3aba7627c4bf070ffa879b48848eb4db2271378f8655845f107719e00f1345be"} Jan 20 16:53:20 crc kubenswrapper[4995]: I0120 16:53:20.180277 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-757b4f8459-2qxp5" podUID="7df7445f-fe61-4af9-9f97-6edeab0ab979" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.212:5353: i/o timeout" Jan 20 16:53:20 crc kubenswrapper[4995]: I0120 16:53:20.892606 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.044297 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-config-data\") pod \"3f5ec115-81c7-46cc-bf53-b48157c83915\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.044788 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-scripts\") pod \"3f5ec115-81c7-46cc-bf53-b48157c83915\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.044834 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tm8cq\" (UniqueName: \"kubernetes.io/projected/3f5ec115-81c7-46cc-bf53-b48157c83915-kube-api-access-tm8cq\") pod \"3f5ec115-81c7-46cc-bf53-b48157c83915\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.044879 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-combined-ca-bundle\") pod \"3f5ec115-81c7-46cc-bf53-b48157c83915\" (UID: \"3f5ec115-81c7-46cc-bf53-b48157c83915\") " Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.051923 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f5ec115-81c7-46cc-bf53-b48157c83915-kube-api-access-tm8cq" (OuterVolumeSpecName: "kube-api-access-tm8cq") pod "3f5ec115-81c7-46cc-bf53-b48157c83915" (UID: "3f5ec115-81c7-46cc-bf53-b48157c83915"). InnerVolumeSpecName "kube-api-access-tm8cq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.072329 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-scripts" (OuterVolumeSpecName: "scripts") pod "3f5ec115-81c7-46cc-bf53-b48157c83915" (UID: "3f5ec115-81c7-46cc-bf53-b48157c83915"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.082550 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f5ec115-81c7-46cc-bf53-b48157c83915" (UID: "3f5ec115-81c7-46cc-bf53-b48157c83915"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.088667 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-config-data" (OuterVolumeSpecName: "config-data") pod "3f5ec115-81c7-46cc-bf53-b48157c83915" (UID: "3f5ec115-81c7-46cc-bf53-b48157c83915"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.147152 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.147189 4995 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.147200 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tm8cq\" (UniqueName: \"kubernetes.io/projected/3f5ec115-81c7-46cc-bf53-b48157c83915-kube-api-access-tm8cq\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.147210 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f5ec115-81c7-46cc-bf53-b48157c83915-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.487833 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gp9bb" event={"ID":"3f5ec115-81c7-46cc-bf53-b48157c83915","Type":"ContainerDied","Data":"6032bef3fd8a053bdf6231388e193cb134b742c0e2833764532891b73eb36178"} Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.487877 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6032bef3fd8a053bdf6231388e193cb134b742c0e2833764532891b73eb36178" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.487950 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gp9bb" Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.674920 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.675267 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="41eb2c3c-0030-4feb-a046-26b558595460" containerName="nova-scheduler-scheduler" containerID="cri-o://28c557cb7e66ee02aab7ab9c869fcd4add944ca116d13d270a33a33b92c43c9f" gracePeriod=30 Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.690542 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.691028 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerName="nova-api-log" containerID="cri-o://5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a" gracePeriod=30 Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.691142 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerName="nova-api-api" containerID="cri-o://9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773" gracePeriod=30 Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.713651 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.713882 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-metadata" containerID="cri-o://c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914" gracePeriod=30 Jan 20 16:53:21 crc kubenswrapper[4995]: I0120 16:53:21.714020 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-log" containerID="cri-o://2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0" gracePeriod=30 Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.357428 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.473500 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-public-tls-certs\") pod \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.473629 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-config-data\") pod \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.474346 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-combined-ca-bundle\") pod \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.474390 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s55q7\" (UniqueName: \"kubernetes.io/projected/1b0570e1-6ac2-41ce-8905-e59a54e06a31-kube-api-access-s55q7\") pod \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.474492 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-internal-tls-certs\") pod \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.474593 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b0570e1-6ac2-41ce-8905-e59a54e06a31-logs\") pod \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\" (UID: \"1b0570e1-6ac2-41ce-8905-e59a54e06a31\") " Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.475588 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b0570e1-6ac2-41ce-8905-e59a54e06a31-logs" (OuterVolumeSpecName: "logs") pod "1b0570e1-6ac2-41ce-8905-e59a54e06a31" (UID: "1b0570e1-6ac2-41ce-8905-e59a54e06a31"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.495201 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b0570e1-6ac2-41ce-8905-e59a54e06a31-kube-api-access-s55q7" (OuterVolumeSpecName: "kube-api-access-s55q7") pod "1b0570e1-6ac2-41ce-8905-e59a54e06a31" (UID: "1b0570e1-6ac2-41ce-8905-e59a54e06a31"). InnerVolumeSpecName "kube-api-access-s55q7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.512704 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-config-data" (OuterVolumeSpecName: "config-data") pod "1b0570e1-6ac2-41ce-8905-e59a54e06a31" (UID: "1b0570e1-6ac2-41ce-8905-e59a54e06a31"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.514489 4995 generic.go:334] "Generic (PLEG): container finished" podID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerID="9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773" exitCode=0 Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.514524 4995 generic.go:334] "Generic (PLEG): container finished" podID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerID="5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a" exitCode=143 Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.514575 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b0570e1-6ac2-41ce-8905-e59a54e06a31","Type":"ContainerDied","Data":"9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773"} Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.514608 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b0570e1-6ac2-41ce-8905-e59a54e06a31","Type":"ContainerDied","Data":"5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a"} Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.514620 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1b0570e1-6ac2-41ce-8905-e59a54e06a31","Type":"ContainerDied","Data":"e8a273f5902eae7b3fa470cf31cf750c3181d52fd3d422a6bc78d9a303b71f1d"} Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.514639 4995 scope.go:117] "RemoveContainer" containerID="9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.514772 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.524713 4995 generic.go:334] "Generic (PLEG): container finished" podID="41eb2c3c-0030-4feb-a046-26b558595460" containerID="28c557cb7e66ee02aab7ab9c869fcd4add944ca116d13d270a33a33b92c43c9f" exitCode=0 Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.524803 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"41eb2c3c-0030-4feb-a046-26b558595460","Type":"ContainerDied","Data":"28c557cb7e66ee02aab7ab9c869fcd4add944ca116d13d270a33a33b92c43c9f"} Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.527436 4995 generic.go:334] "Generic (PLEG): container finished" podID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerID="2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0" exitCode=143 Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.527462 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"879d57fe-7d15-4279-b4b2-cd5f16db5ac9","Type":"ContainerDied","Data":"2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0"} Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.539187 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1b0570e1-6ac2-41ce-8905-e59a54e06a31" (UID: "1b0570e1-6ac2-41ce-8905-e59a54e06a31"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.543716 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b0570e1-6ac2-41ce-8905-e59a54e06a31" (UID: "1b0570e1-6ac2-41ce-8905-e59a54e06a31"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.561242 4995 scope.go:117] "RemoveContainer" containerID="5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.566715 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1b0570e1-6ac2-41ce-8905-e59a54e06a31" (UID: "1b0570e1-6ac2-41ce-8905-e59a54e06a31"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.578827 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.578866 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s55q7\" (UniqueName: \"kubernetes.io/projected/1b0570e1-6ac2-41ce-8905-e59a54e06a31-kube-api-access-s55q7\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.578881 4995 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.578895 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1b0570e1-6ac2-41ce-8905-e59a54e06a31-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.578907 4995 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.578917 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b0570e1-6ac2-41ce-8905-e59a54e06a31-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.584071 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.589884 4995 scope.go:117] "RemoveContainer" containerID="9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773" Jan 20 16:53:22 crc kubenswrapper[4995]: E0120 16:53:22.590263 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773\": container with ID starting with 9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773 not found: ID does not exist" containerID="9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.590299 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773"} err="failed to get container status \"9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773\": rpc error: code = NotFound desc = could not find container \"9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773\": container with ID starting with 9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773 not found: ID does not exist" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.590324 4995 scope.go:117] "RemoveContainer" containerID="5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a" Jan 20 16:53:22 crc kubenswrapper[4995]: E0120 16:53:22.591176 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a\": container with ID starting with 5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a not found: ID does not exist" containerID="5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.591209 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a"} err="failed to get container status \"5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a\": rpc error: code = NotFound desc = could not find container \"5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a\": container with ID starting with 5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a not found: ID does not exist" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.591229 4995 scope.go:117] "RemoveContainer" containerID="9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.591511 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773"} err="failed to get container status \"9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773\": rpc error: code = NotFound desc = could not find container \"9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773\": container with ID starting with 9071b4371b177571583b12b2068e8eb247e48321f296404f34ff00e3cacdf773 not found: ID does not exist" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.591549 4995 scope.go:117] "RemoveContainer" containerID="5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.591798 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a"} err="failed to get container status \"5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a\": rpc error: code = NotFound desc = could not find container \"5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a\": container with ID starting with 5f9f1711dcca616834982cadb77ccd11ac59e7a399ade268e4bb8234b8e4c48a not found: ID does not exist" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.680394 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nm9p6\" (UniqueName: \"kubernetes.io/projected/41eb2c3c-0030-4feb-a046-26b558595460-kube-api-access-nm9p6\") pod \"41eb2c3c-0030-4feb-a046-26b558595460\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.680985 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-config-data\") pod \"41eb2c3c-0030-4feb-a046-26b558595460\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.681448 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-combined-ca-bundle\") pod \"41eb2c3c-0030-4feb-a046-26b558595460\" (UID: \"41eb2c3c-0030-4feb-a046-26b558595460\") " Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.683912 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41eb2c3c-0030-4feb-a046-26b558595460-kube-api-access-nm9p6" (OuterVolumeSpecName: "kube-api-access-nm9p6") pod "41eb2c3c-0030-4feb-a046-26b558595460" (UID: "41eb2c3c-0030-4feb-a046-26b558595460"). InnerVolumeSpecName "kube-api-access-nm9p6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.720682 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41eb2c3c-0030-4feb-a046-26b558595460" (UID: "41eb2c3c-0030-4feb-a046-26b558595460"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.727464 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-config-data" (OuterVolumeSpecName: "config-data") pod "41eb2c3c-0030-4feb-a046-26b558595460" (UID: "41eb2c3c-0030-4feb-a046-26b558595460"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.785057 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.785116 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nm9p6\" (UniqueName: \"kubernetes.io/projected/41eb2c3c-0030-4feb-a046-26b558595460-kube-api-access-nm9p6\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.785132 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41eb2c3c-0030-4feb-a046-26b558595460-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.888868 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.897873 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.917520 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:22 crc kubenswrapper[4995]: E0120 16:53:22.918129 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7df7445f-fe61-4af9-9f97-6edeab0ab979" containerName="init" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918162 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7df7445f-fe61-4af9-9f97-6edeab0ab979" containerName="init" Jan 20 16:53:22 crc kubenswrapper[4995]: E0120 16:53:22.918187 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f5ec115-81c7-46cc-bf53-b48157c83915" containerName="nova-manage" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918197 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f5ec115-81c7-46cc-bf53-b48157c83915" containerName="nova-manage" Jan 20 16:53:22 crc kubenswrapper[4995]: E0120 16:53:22.918215 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerName="nova-api-api" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918225 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerName="nova-api-api" Jan 20 16:53:22 crc kubenswrapper[4995]: E0120 16:53:22.918245 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7df7445f-fe61-4af9-9f97-6edeab0ab979" containerName="dnsmasq-dns" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918255 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7df7445f-fe61-4af9-9f97-6edeab0ab979" containerName="dnsmasq-dns" Jan 20 16:53:22 crc kubenswrapper[4995]: E0120 16:53:22.918269 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerName="nova-api-log" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918280 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerName="nova-api-log" Jan 20 16:53:22 crc kubenswrapper[4995]: E0120 16:53:22.918301 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41eb2c3c-0030-4feb-a046-26b558595460" containerName="nova-scheduler-scheduler" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918314 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="41eb2c3c-0030-4feb-a046-26b558595460" containerName="nova-scheduler-scheduler" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918616 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f5ec115-81c7-46cc-bf53-b48157c83915" containerName="nova-manage" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918652 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerName="nova-api-log" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918669 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" containerName="nova-api-api" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918688 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="7df7445f-fe61-4af9-9f97-6edeab0ab979" containerName="dnsmasq-dns" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.918705 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="41eb2c3c-0030-4feb-a046-26b558595460" containerName="nova-scheduler-scheduler" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.919961 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.921872 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.922054 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.922111 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 20 16:53:22 crc kubenswrapper[4995]: I0120 16:53:22.945311 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.091734 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-public-tls-certs\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.091810 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-config-data\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.093346 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.093785 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrsk8\" (UniqueName: \"kubernetes.io/projected/d6dd58d8-2dec-4e9a-b9b7-78f585378448-kube-api-access-hrsk8\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.093903 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6dd58d8-2dec-4e9a-b9b7-78f585378448-logs\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.094067 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.196176 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.196248 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrsk8\" (UniqueName: \"kubernetes.io/projected/d6dd58d8-2dec-4e9a-b9b7-78f585378448-kube-api-access-hrsk8\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.196278 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6dd58d8-2dec-4e9a-b9b7-78f585378448-logs\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.196323 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.196355 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-public-tls-certs\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.196382 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-config-data\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.196847 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6dd58d8-2dec-4e9a-b9b7-78f585378448-logs\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.201783 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-public-tls-certs\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.202777 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.203546 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-config-data\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.206134 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6dd58d8-2dec-4e9a-b9b7-78f585378448-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.226675 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrsk8\" (UniqueName: \"kubernetes.io/projected/d6dd58d8-2dec-4e9a-b9b7-78f585378448-kube-api-access-hrsk8\") pod \"nova-api-0\" (UID: \"d6dd58d8-2dec-4e9a-b9b7-78f585378448\") " pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.238729 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.541771 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"41eb2c3c-0030-4feb-a046-26b558595460","Type":"ContainerDied","Data":"63ba2da10a9687266413ad9108857eda38e2ad184bdc15d92790bb0c100255dc"} Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.541821 4995 scope.go:117] "RemoveContainer" containerID="28c557cb7e66ee02aab7ab9c869fcd4add944ca116d13d270a33a33b92c43c9f" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.541889 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.588919 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.604645 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.618762 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.620752 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.623297 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.628332 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.705924 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b776e369-c664-4e5e-a256-b5a1725c0142-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b776e369-c664-4e5e-a256-b5a1725c0142\") " pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.706012 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b776e369-c664-4e5e-a256-b5a1725c0142-config-data\") pod \"nova-scheduler-0\" (UID: \"b776e369-c664-4e5e-a256-b5a1725c0142\") " pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.706038 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm9v6\" (UniqueName: \"kubernetes.io/projected/b776e369-c664-4e5e-a256-b5a1725c0142-kube-api-access-dm9v6\") pod \"nova-scheduler-0\" (UID: \"b776e369-c664-4e5e-a256-b5a1725c0142\") " pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.756596 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.808922 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b776e369-c664-4e5e-a256-b5a1725c0142-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b776e369-c664-4e5e-a256-b5a1725c0142\") " pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.809237 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b776e369-c664-4e5e-a256-b5a1725c0142-config-data\") pod \"nova-scheduler-0\" (UID: \"b776e369-c664-4e5e-a256-b5a1725c0142\") " pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.809328 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm9v6\" (UniqueName: \"kubernetes.io/projected/b776e369-c664-4e5e-a256-b5a1725c0142-kube-api-access-dm9v6\") pod \"nova-scheduler-0\" (UID: \"b776e369-c664-4e5e-a256-b5a1725c0142\") " pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.814878 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b776e369-c664-4e5e-a256-b5a1725c0142-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b776e369-c664-4e5e-a256-b5a1725c0142\") " pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.815696 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b776e369-c664-4e5e-a256-b5a1725c0142-config-data\") pod \"nova-scheduler-0\" (UID: \"b776e369-c664-4e5e-a256-b5a1725c0142\") " pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.840667 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm9v6\" (UniqueName: \"kubernetes.io/projected/b776e369-c664-4e5e-a256-b5a1725c0142-kube-api-access-dm9v6\") pod \"nova-scheduler-0\" (UID: \"b776e369-c664-4e5e-a256-b5a1725c0142\") " pod="openstack/nova-scheduler-0" Jan 20 16:53:23 crc kubenswrapper[4995]: I0120 16:53:23.942815 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.005984 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b0570e1-6ac2-41ce-8905-e59a54e06a31" path="/var/lib/kubelet/pods/1b0570e1-6ac2-41ce-8905-e59a54e06a31/volumes" Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.007049 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41eb2c3c-0030-4feb-a046-26b558595460" path="/var/lib/kubelet/pods/41eb2c3c-0030-4feb-a046-26b558595460/volumes" Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.405851 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.557585 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6dd58d8-2dec-4e9a-b9b7-78f585378448","Type":"ContainerStarted","Data":"b1e0dda3eb5f31a65d3749dd2d550ccf329c357fea39b79c56ffd004585fa866"} Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.557629 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6dd58d8-2dec-4e9a-b9b7-78f585378448","Type":"ContainerStarted","Data":"55bd3bfc4b4fcd4827b3f878940e7704419a03613b93df38c5de18d5294f044a"} Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.557640 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6dd58d8-2dec-4e9a-b9b7-78f585378448","Type":"ContainerStarted","Data":"837a1eda7ac3241dca2bcaa94c4e1481687a6a7a7c0e38201a010ed0257aa77f"} Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.560315 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b776e369-c664-4e5e-a256-b5a1725c0142","Type":"ContainerStarted","Data":"a653940d5f3fba57c6c180181e3fed0b8fafd9e68c200cd0db1ec57dcd6296ee"} Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.578251 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.57823288 podStartE2EDuration="2.57823288s" podCreationTimestamp="2026-01-20 16:53:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:53:24.576056271 +0000 UTC m=+1322.820661077" watchObservedRunningTime="2026-01-20 16:53:24.57823288 +0000 UTC m=+1322.822837686" Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.883129 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.216:8775/\": read tcp 10.217.0.2:54444->10.217.0.216:8775: read: connection reset by peer" Jan 20 16:53:24 crc kubenswrapper[4995]: I0120 16:53:24.883195 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.216:8775/\": read tcp 10.217.0.2:54442->10.217.0.216:8775: read: connection reset by peer" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.308634 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.442633 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-combined-ca-bundle\") pod \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.442721 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-config-data\") pod \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.442751 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-nova-metadata-tls-certs\") pod \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.442781 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-logs\") pod \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.442813 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6n6s\" (UniqueName: \"kubernetes.io/projected/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-kube-api-access-m6n6s\") pod \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\" (UID: \"879d57fe-7d15-4279-b4b2-cd5f16db5ac9\") " Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.443408 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-logs" (OuterVolumeSpecName: "logs") pod "879d57fe-7d15-4279-b4b2-cd5f16db5ac9" (UID: "879d57fe-7d15-4279-b4b2-cd5f16db5ac9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.443996 4995 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-logs\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.460153 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-kube-api-access-m6n6s" (OuterVolumeSpecName: "kube-api-access-m6n6s") pod "879d57fe-7d15-4279-b4b2-cd5f16db5ac9" (UID: "879d57fe-7d15-4279-b4b2-cd5f16db5ac9"). InnerVolumeSpecName "kube-api-access-m6n6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.472833 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "879d57fe-7d15-4279-b4b2-cd5f16db5ac9" (UID: "879d57fe-7d15-4279-b4b2-cd5f16db5ac9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.478381 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-config-data" (OuterVolumeSpecName: "config-data") pod "879d57fe-7d15-4279-b4b2-cd5f16db5ac9" (UID: "879d57fe-7d15-4279-b4b2-cd5f16db5ac9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.502623 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "879d57fe-7d15-4279-b4b2-cd5f16db5ac9" (UID: "879d57fe-7d15-4279-b4b2-cd5f16db5ac9"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.545825 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.545865 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.545877 4995 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.545891 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6n6s\" (UniqueName: \"kubernetes.io/projected/879d57fe-7d15-4279-b4b2-cd5f16db5ac9-kube-api-access-m6n6s\") on node \"crc\" DevicePath \"\"" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.571140 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b776e369-c664-4e5e-a256-b5a1725c0142","Type":"ContainerStarted","Data":"4ccadea17f3e13ac38b2a11925d10484dccd4832543f83b770aef304e93b5d90"} Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.573364 4995 generic.go:334] "Generic (PLEG): container finished" podID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerID="c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914" exitCode=0 Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.573817 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.573917 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"879d57fe-7d15-4279-b4b2-cd5f16db5ac9","Type":"ContainerDied","Data":"c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914"} Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.573961 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"879d57fe-7d15-4279-b4b2-cd5f16db5ac9","Type":"ContainerDied","Data":"91e278fc9b4e994cab02999ea2f9d709d466691e031be40fa0223cf190c07937"} Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.573996 4995 scope.go:117] "RemoveContainer" containerID="c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.591392 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.5913715760000002 podStartE2EDuration="2.591371576s" podCreationTimestamp="2026-01-20 16:53:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:53:25.584216623 +0000 UTC m=+1323.828821429" watchObservedRunningTime="2026-01-20 16:53:25.591371576 +0000 UTC m=+1323.835976382" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.599812 4995 scope.go:117] "RemoveContainer" containerID="2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.629178 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.633738 4995 scope.go:117] "RemoveContainer" containerID="c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914" Jan 20 16:53:25 crc kubenswrapper[4995]: E0120 16:53:25.636830 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914\": container with ID starting with c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914 not found: ID does not exist" containerID="c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.636877 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914"} err="failed to get container status \"c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914\": rpc error: code = NotFound desc = could not find container \"c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914\": container with ID starting with c4dff935b6ed5ec97f3c1f48260723e6c2473d099b25f8c2f6ebc23ca2029914 not found: ID does not exist" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.636904 4995 scope.go:117] "RemoveContainer" containerID="2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0" Jan 20 16:53:25 crc kubenswrapper[4995]: E0120 16:53:25.637355 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0\": container with ID starting with 2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0 not found: ID does not exist" containerID="2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.637443 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0"} err="failed to get container status \"2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0\": rpc error: code = NotFound desc = could not find container \"2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0\": container with ID starting with 2eecd4cf6e8e3037b95508a5d69072858c011c2d5bea97a456b4f8731d2203c0 not found: ID does not exist" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.642409 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.651709 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:53:25 crc kubenswrapper[4995]: E0120 16:53:25.652255 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-log" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.652280 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-log" Jan 20 16:53:25 crc kubenswrapper[4995]: E0120 16:53:25.652298 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-metadata" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.652304 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-metadata" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.652558 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-metadata" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.652594 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" containerName="nova-metadata-log" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.653765 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.656889 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.657044 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.683182 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.749326 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a499d87-fe94-4606-85e0-a225b12773f7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.749469 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a499d87-fe94-4606-85e0-a225b12773f7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.749508 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg27k\" (UniqueName: \"kubernetes.io/projected/4a499d87-fe94-4606-85e0-a225b12773f7-kube-api-access-xg27k\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.749545 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a499d87-fe94-4606-85e0-a225b12773f7-config-data\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.749655 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a499d87-fe94-4606-85e0-a225b12773f7-logs\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.852548 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a499d87-fe94-4606-85e0-a225b12773f7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.852611 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg27k\" (UniqueName: \"kubernetes.io/projected/4a499d87-fe94-4606-85e0-a225b12773f7-kube-api-access-xg27k\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.852666 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a499d87-fe94-4606-85e0-a225b12773f7-config-data\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.852748 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a499d87-fe94-4606-85e0-a225b12773f7-logs\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.853262 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a499d87-fe94-4606-85e0-a225b12773f7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.853547 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4a499d87-fe94-4606-85e0-a225b12773f7-logs\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.856860 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a499d87-fe94-4606-85e0-a225b12773f7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.857653 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a499d87-fe94-4606-85e0-a225b12773f7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.859028 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a499d87-fe94-4606-85e0-a225b12773f7-config-data\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.877323 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg27k\" (UniqueName: \"kubernetes.io/projected/4a499d87-fe94-4606-85e0-a225b12773f7-kube-api-access-xg27k\") pod \"nova-metadata-0\" (UID: \"4a499d87-fe94-4606-85e0-a225b12773f7\") " pod="openstack/nova-metadata-0" Jan 20 16:53:25 crc kubenswrapper[4995]: I0120 16:53:25.972047 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 16:53:26 crc kubenswrapper[4995]: I0120 16:53:26.002879 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="879d57fe-7d15-4279-b4b2-cd5f16db5ac9" path="/var/lib/kubelet/pods/879d57fe-7d15-4279-b4b2-cd5f16db5ac9/volumes" Jan 20 16:53:26 crc kubenswrapper[4995]: I0120 16:53:26.477183 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 16:53:26 crc kubenswrapper[4995]: I0120 16:53:26.607328 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4a499d87-fe94-4606-85e0-a225b12773f7","Type":"ContainerStarted","Data":"58eb2bc2fd304c7b3b387bc843f4b9a37b52a66278d974f1f466986506ca3f31"} Jan 20 16:53:27 crc kubenswrapper[4995]: I0120 16:53:27.633312 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4a499d87-fe94-4606-85e0-a225b12773f7","Type":"ContainerStarted","Data":"c1ea1c83d6546d474f25f7e65301eaf55816e1aa9deacbfb4c81c0f2be871ced"} Jan 20 16:53:27 crc kubenswrapper[4995]: I0120 16:53:27.633879 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4a499d87-fe94-4606-85e0-a225b12773f7","Type":"ContainerStarted","Data":"43e720cb5c9e039d3f94092a32698189e088e9ff4d08e773ef122d7bb01e77fb"} Jan 20 16:53:27 crc kubenswrapper[4995]: I0120 16:53:27.665400 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.665375124 podStartE2EDuration="2.665375124s" podCreationTimestamp="2026-01-20 16:53:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:53:27.650308336 +0000 UTC m=+1325.894913172" watchObservedRunningTime="2026-01-20 16:53:27.665375124 +0000 UTC m=+1325.909979940" Jan 20 16:53:28 crc kubenswrapper[4995]: I0120 16:53:28.943811 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 20 16:53:30 crc kubenswrapper[4995]: I0120 16:53:30.572134 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:53:30 crc kubenswrapper[4995]: I0120 16:53:30.572379 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:53:30 crc kubenswrapper[4995]: I0120 16:53:30.972694 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 16:53:30 crc kubenswrapper[4995]: I0120 16:53:30.972823 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 16:53:33 crc kubenswrapper[4995]: I0120 16:53:33.239283 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 16:53:33 crc kubenswrapper[4995]: I0120 16:53:33.239644 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 16:53:33 crc kubenswrapper[4995]: I0120 16:53:33.943112 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 20 16:53:33 crc kubenswrapper[4995]: I0120 16:53:33.976397 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 20 16:53:34 crc kubenswrapper[4995]: I0120 16:53:34.257256 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d6dd58d8-2dec-4e9a-b9b7-78f585378448" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.226:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 16:53:34 crc kubenswrapper[4995]: I0120 16:53:34.257268 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d6dd58d8-2dec-4e9a-b9b7-78f585378448" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.226:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 16:53:34 crc kubenswrapper[4995]: I0120 16:53:34.731559 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 20 16:53:35 crc kubenswrapper[4995]: I0120 16:53:35.973111 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 20 16:53:35 crc kubenswrapper[4995]: I0120 16:53:35.974392 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 20 16:53:36 crc kubenswrapper[4995]: I0120 16:53:36.987238 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4a499d87-fe94-4606-85e0-a225b12773f7" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.228:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 16:53:36 crc kubenswrapper[4995]: I0120 16:53:36.987238 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4a499d87-fe94-4606-85e0-a225b12773f7" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.228:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 16:53:40 crc kubenswrapper[4995]: I0120 16:53:40.713162 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 20 16:53:43 crc kubenswrapper[4995]: I0120 16:53:43.247172 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 20 16:53:43 crc kubenswrapper[4995]: I0120 16:53:43.248345 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 20 16:53:43 crc kubenswrapper[4995]: I0120 16:53:43.248723 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 20 16:53:43 crc kubenswrapper[4995]: I0120 16:53:43.248789 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 20 16:53:43 crc kubenswrapper[4995]: I0120 16:53:43.254743 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 20 16:53:43 crc kubenswrapper[4995]: I0120 16:53:43.257020 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 20 16:53:45 crc kubenswrapper[4995]: I0120 16:53:45.981402 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 20 16:53:45 crc kubenswrapper[4995]: I0120 16:53:45.982136 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 20 16:53:46 crc kubenswrapper[4995]: I0120 16:53:46.008794 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 20 16:53:46 crc kubenswrapper[4995]: I0120 16:53:46.839617 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 20 16:53:56 crc kubenswrapper[4995]: I0120 16:53:56.275478 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 16:53:58 crc kubenswrapper[4995]: I0120 16:53:58.082132 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 16:54:00 crc kubenswrapper[4995]: I0120 16:54:00.571709 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:54:00 crc kubenswrapper[4995]: I0120 16:54:00.572027 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:54:00 crc kubenswrapper[4995]: I0120 16:54:00.572070 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:54:00 crc kubenswrapper[4995]: I0120 16:54:00.572797 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e4ec06a3af3d63376517d75d9eacbb252d52f03f8933ff215b7181152846db60"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 16:54:00 crc kubenswrapper[4995]: I0120 16:54:00.572849 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://e4ec06a3af3d63376517d75d9eacbb252d52f03f8933ff215b7181152846db60" gracePeriod=600 Jan 20 16:54:00 crc kubenswrapper[4995]: I0120 16:54:00.821516 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" containerName="rabbitmq" containerID="cri-o://e3deb01ca2be4cb8c084ba433fa4731ea41a459683255dc3063bea01ec264540" gracePeriod=604796 Jan 20 16:54:00 crc kubenswrapper[4995]: I0120 16:54:00.976003 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="e4ec06a3af3d63376517d75d9eacbb252d52f03f8933ff215b7181152846db60" exitCode=0 Jan 20 16:54:00 crc kubenswrapper[4995]: I0120 16:54:00.976094 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"e4ec06a3af3d63376517d75d9eacbb252d52f03f8933ff215b7181152846db60"} Jan 20 16:54:00 crc kubenswrapper[4995]: I0120 16:54:00.976409 4995 scope.go:117] "RemoveContainer" containerID="8ecef0f787bcc6c0229321b3bf04fd7a400236ca19aefa00a3e8afeb5931315b" Jan 20 16:54:01 crc kubenswrapper[4995]: I0120 16:54:01.987533 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954"} Jan 20 16:54:02 crc kubenswrapper[4995]: I0120 16:54:02.270935 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="79c459b9-ccad-49a5-b945-64903e2c5308" containerName="rabbitmq" containerID="cri-o://a2b32144a30f90a91a08d21858dce811acfa706d9048a95a9b86d7591102fea1" gracePeriod=604796 Jan 20 16:54:03 crc kubenswrapper[4995]: I0120 16:54:03.219830 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Jan 20 16:54:03 crc kubenswrapper[4995]: I0120 16:54:03.508939 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="79c459b9-ccad-49a5-b945-64903e2c5308" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.088466 4995 generic.go:334] "Generic (PLEG): container finished" podID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" containerID="e3deb01ca2be4cb8c084ba433fa4731ea41a459683255dc3063bea01ec264540" exitCode=0 Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.088897 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4055d0be-e174-4fb9-9026-1a0499fe9dc6","Type":"ContainerDied","Data":"e3deb01ca2be4cb8c084ba433fa4731ea41a459683255dc3063bea01ec264540"} Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.500654 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681069 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-server-conf\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681158 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sclxt\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-kube-api-access-sclxt\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681187 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681220 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4055d0be-e174-4fb9-9026-1a0499fe9dc6-erlang-cookie-secret\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681243 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-confd\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681267 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4055d0be-e174-4fb9-9026-1a0499fe9dc6-pod-info\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681310 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-plugins-conf\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681405 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-plugins\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681464 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-erlang-cookie\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681495 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-config-data\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.681528 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-tls\") pod \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\" (UID: \"4055d0be-e174-4fb9-9026-1a0499fe9dc6\") " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.682417 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.682447 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.683211 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.688336 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.688440 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-kube-api-access-sclxt" (OuterVolumeSpecName: "kube-api-access-sclxt") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "kube-api-access-sclxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.697865 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/4055d0be-e174-4fb9-9026-1a0499fe9dc6-pod-info" (OuterVolumeSpecName: "pod-info") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.698984 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4055d0be-e174-4fb9-9026-1a0499fe9dc6-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.703402 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.722700 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-config-data" (OuterVolumeSpecName: "config-data") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.774298 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-server-conf" (OuterVolumeSpecName: "server-conf") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784040 4995 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784070 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784094 4995 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784103 4995 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-server-conf\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784112 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sclxt\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-kube-api-access-sclxt\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784133 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784141 4995 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4055d0be-e174-4fb9-9026-1a0499fe9dc6-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784281 4995 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4055d0be-e174-4fb9-9026-1a0499fe9dc6-pod-info\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784330 4995 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4055d0be-e174-4fb9-9026-1a0499fe9dc6-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.784351 4995 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.799766 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "4055d0be-e174-4fb9-9026-1a0499fe9dc6" (UID: "4055d0be-e174-4fb9-9026-1a0499fe9dc6"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.809509 4995 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.886299 4995 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:07 crc kubenswrapper[4995]: I0120 16:54:07.886348 4995 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4055d0be-e174-4fb9-9026-1a0499fe9dc6-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.101051 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4055d0be-e174-4fb9-9026-1a0499fe9dc6","Type":"ContainerDied","Data":"040d368f0bb56ae1400105700fdcdbf596c8829ca86ebbb8d7f8a2b672a29b53"} Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.101135 4995 scope.go:117] "RemoveContainer" containerID="e3deb01ca2be4cb8c084ba433fa4731ea41a459683255dc3063bea01ec264540" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.101302 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.129143 4995 scope.go:117] "RemoveContainer" containerID="e43764df4b82f7c03cd027fee5d4c77391d8c5774ce51d750a84dc6225286250" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.134112 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.150224 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.164280 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 16:54:08 crc kubenswrapper[4995]: E0120 16:54:08.174136 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" containerName="setup-container" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.174175 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" containerName="setup-container" Jan 20 16:54:08 crc kubenswrapper[4995]: E0120 16:54:08.174199 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" containerName="rabbitmq" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.174208 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" containerName="rabbitmq" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.174569 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" containerName="rabbitmq" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.175864 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.179455 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.179670 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.179793 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.181845 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.185224 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-nsxr9" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.185466 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.191312 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.195353 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.293313 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.293631 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.293781 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.293920 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.294007 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cfa14e5d-418a-4eed-96fe-fef4b2a88543-config-data\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.294120 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5pxd\" (UniqueName: \"kubernetes.io/projected/cfa14e5d-418a-4eed-96fe-fef4b2a88543-kube-api-access-b5pxd\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.294259 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cfa14e5d-418a-4eed-96fe-fef4b2a88543-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.294366 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cfa14e5d-418a-4eed-96fe-fef4b2a88543-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.294462 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.294550 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cfa14e5d-418a-4eed-96fe-fef4b2a88543-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.294653 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cfa14e5d-418a-4eed-96fe-fef4b2a88543-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.396302 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.396741 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.396747 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.397226 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cfa14e5d-418a-4eed-96fe-fef4b2a88543-config-data\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.397440 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5pxd\" (UniqueName: \"kubernetes.io/projected/cfa14e5d-418a-4eed-96fe-fef4b2a88543-kube-api-access-b5pxd\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.397906 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cfa14e5d-418a-4eed-96fe-fef4b2a88543-config-data\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.398435 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cfa14e5d-418a-4eed-96fe-fef4b2a88543-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.398808 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cfa14e5d-418a-4eed-96fe-fef4b2a88543-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.398975 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.399407 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cfa14e5d-418a-4eed-96fe-fef4b2a88543-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.399666 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cfa14e5d-418a-4eed-96fe-fef4b2a88543-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.400962 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cfa14e5d-418a-4eed-96fe-fef4b2a88543-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.401248 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.401511 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cfa14e5d-418a-4eed-96fe-fef4b2a88543-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.404930 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.405495 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cfa14e5d-418a-4eed-96fe-fef4b2a88543-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.406364 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.409784 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.406869 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cfa14e5d-418a-4eed-96fe-fef4b2a88543-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.410671 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.410470 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cfa14e5d-418a-4eed-96fe-fef4b2a88543-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.418359 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5pxd\" (UniqueName: \"kubernetes.io/projected/cfa14e5d-418a-4eed-96fe-fef4b2a88543-kube-api-access-b5pxd\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.462465 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"cfa14e5d-418a-4eed-96fe-fef4b2a88543\") " pod="openstack/rabbitmq-server-0" Jan 20 16:54:08 crc kubenswrapper[4995]: I0120 16:54:08.518264 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.045618 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.115739 4995 generic.go:334] "Generic (PLEG): container finished" podID="79c459b9-ccad-49a5-b945-64903e2c5308" containerID="a2b32144a30f90a91a08d21858dce811acfa706d9048a95a9b86d7591102fea1" exitCode=0 Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.115944 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"79c459b9-ccad-49a5-b945-64903e2c5308","Type":"ContainerDied","Data":"a2b32144a30f90a91a08d21858dce811acfa706d9048a95a9b86d7591102fea1"} Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.123386 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfa14e5d-418a-4eed-96fe-fef4b2a88543","Type":"ContainerStarted","Data":"84c63da2f38f7267dc66108c0057ce0e5ef420d54b77b4d70b5709407865ab7b"} Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.337709 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345373 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-server-conf\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345410 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-config-data\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345479 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/79c459b9-ccad-49a5-b945-64903e2c5308-pod-info\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345529 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-plugins-conf\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345565 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-erlang-cookie\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345599 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-tls\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345621 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-confd\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345648 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgpjp\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-kube-api-access-cgpjp\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345673 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/79c459b9-ccad-49a5-b945-64903e2c5308-erlang-cookie-secret\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345714 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.345834 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-plugins\") pod \"79c459b9-ccad-49a5-b945-64903e2c5308\" (UID: \"79c459b9-ccad-49a5-b945-64903e2c5308\") " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.346455 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.346688 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.350482 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.351419 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/79c459b9-ccad-49a5-b945-64903e2c5308-pod-info" (OuterVolumeSpecName: "pod-info") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.355264 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.357618 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c459b9-ccad-49a5-b945-64903e2c5308-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.358726 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-kube-api-access-cgpjp" (OuterVolumeSpecName: "kube-api-access-cgpjp") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "kube-api-access-cgpjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.366038 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.416308 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-config-data" (OuterVolumeSpecName: "config-data") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.418278 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-server-conf" (OuterVolumeSpecName: "server-conf") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.447899 4995 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.447936 4995 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-server-conf\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.447946 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.447957 4995 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/79c459b9-ccad-49a5-b945-64903e2c5308-pod-info\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.447981 4995 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/79c459b9-ccad-49a5-b945-64903e2c5308-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.447993 4995 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.448001 4995 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.448009 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgpjp\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-kube-api-access-cgpjp\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.448019 4995 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/79c459b9-ccad-49a5-b945-64903e2c5308-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.448039 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.471890 4995 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.528852 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "79c459b9-ccad-49a5-b945-64903e2c5308" (UID: "79c459b9-ccad-49a5-b945-64903e2c5308"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.549326 4995 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/79c459b9-ccad-49a5-b945-64903e2c5308-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:09 crc kubenswrapper[4995]: I0120 16:54:09.549361 4995 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.001057 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4055d0be-e174-4fb9-9026-1a0499fe9dc6" path="/var/lib/kubelet/pods/4055d0be-e174-4fb9-9026-1a0499fe9dc6/volumes" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.132974 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"79c459b9-ccad-49a5-b945-64903e2c5308","Type":"ContainerDied","Data":"2abf40b6d8836fc2d50620d52df63f67070bd8552605ee01760a0ee284703e59"} Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.133027 4995 scope.go:117] "RemoveContainer" containerID="a2b32144a30f90a91a08d21858dce811acfa706d9048a95a9b86d7591102fea1" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.133190 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.159383 4995 scope.go:117] "RemoveContainer" containerID="f9924f8ceedb006a1a3a2d00d1ed358cfb77191ca6fd9c24966a0a177abff2a8" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.178458 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.204636 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.215536 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 16:54:10 crc kubenswrapper[4995]: E0120 16:54:10.219184 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79c459b9-ccad-49a5-b945-64903e2c5308" containerName="setup-container" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.219208 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="79c459b9-ccad-49a5-b945-64903e2c5308" containerName="setup-container" Jan 20 16:54:10 crc kubenswrapper[4995]: E0120 16:54:10.219232 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79c459b9-ccad-49a5-b945-64903e2c5308" containerName="rabbitmq" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.219238 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="79c459b9-ccad-49a5-b945-64903e2c5308" containerName="rabbitmq" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.219456 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="79c459b9-ccad-49a5-b945-64903e2c5308" containerName="rabbitmq" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.223294 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.224981 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-cxrpf" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.225299 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.226055 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.233177 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.233381 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.233553 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.233637 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.240710 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.361954 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362017 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v7nz\" (UniqueName: \"kubernetes.io/projected/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-kube-api-access-5v7nz\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362097 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362119 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362160 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362176 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362206 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362246 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362267 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362283 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.362301 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464048 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464145 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5v7nz\" (UniqueName: \"kubernetes.io/projected/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-kube-api-access-5v7nz\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464178 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464204 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464259 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464276 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464308 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464354 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464381 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464397 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464433 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.464485 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.465238 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.466361 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.466839 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.467275 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.467416 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.470183 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.471183 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.471440 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.472463 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.491733 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v7nz\" (UniqueName: \"kubernetes.io/projected/a10b59cc-41b2-49f9-ba12-2bdb82b568f7-kube-api-access-5v7nz\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.509792 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"a10b59cc-41b2-49f9-ba12-2bdb82b568f7\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:10 crc kubenswrapper[4995]: I0120 16:54:10.555155 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.008758 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 16:54:11 crc kubenswrapper[4995]: W0120 16:54:11.019363 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda10b59cc_41b2_49f9_ba12_2bdb82b568f7.slice/crio-746d59dcea404cb7cb8a0b1c8c7ed1a8be68d82669bd235e2ac3b2d77ba5ed52 WatchSource:0}: Error finding container 746d59dcea404cb7cb8a0b1c8c7ed1a8be68d82669bd235e2ac3b2d77ba5ed52: Status 404 returned error can't find the container with id 746d59dcea404cb7cb8a0b1c8c7ed1a8be68d82669bd235e2ac3b2d77ba5ed52 Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.147131 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfa14e5d-418a-4eed-96fe-fef4b2a88543","Type":"ContainerStarted","Data":"577ade6b383ff5ec7cdcfa03185022faea0b0ff1817b28f8b8f2b644c711a260"} Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.150789 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a10b59cc-41b2-49f9-ba12-2bdb82b568f7","Type":"ContainerStarted","Data":"746d59dcea404cb7cb8a0b1c8c7ed1a8be68d82669bd235e2ac3b2d77ba5ed52"} Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.681679 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-spslg"] Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.683695 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.685378 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.711895 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-spslg"] Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.786591 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.786665 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4kf2\" (UniqueName: \"kubernetes.io/projected/09692f09-ec00-4d98-93c3-f1fc0782619c-kube-api-access-j4kf2\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.786699 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-config\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.786723 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.786746 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.786762 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.786795 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.888396 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.889559 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.889688 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4kf2\" (UniqueName: \"kubernetes.io/projected/09692f09-ec00-4d98-93c3-f1fc0782619c-kube-api-access-j4kf2\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.889711 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-config\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.889766 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.890458 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.889798 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.890526 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.890854 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-config\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.890894 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.891129 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.891200 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.891358 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:11 crc kubenswrapper[4995]: I0120 16:54:11.926213 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4kf2\" (UniqueName: \"kubernetes.io/projected/09692f09-ec00-4d98-93c3-f1fc0782619c-kube-api-access-j4kf2\") pod \"dnsmasq-dns-79bd4cc8c9-spslg\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:12 crc kubenswrapper[4995]: I0120 16:54:12.001548 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:12 crc kubenswrapper[4995]: I0120 16:54:12.001976 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79c459b9-ccad-49a5-b945-64903e2c5308" path="/var/lib/kubelet/pods/79c459b9-ccad-49a5-b945-64903e2c5308/volumes" Jan 20 16:54:12 crc kubenswrapper[4995]: I0120 16:54:12.483163 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-spslg"] Jan 20 16:54:13 crc kubenswrapper[4995]: I0120 16:54:13.181444 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a10b59cc-41b2-49f9-ba12-2bdb82b568f7","Type":"ContainerStarted","Data":"d645cf4f8c2070f53c34ba3d3b098c7e95c2b1e085dd538df4e6260de55a5b0b"} Jan 20 16:54:13 crc kubenswrapper[4995]: I0120 16:54:13.183983 4995 generic.go:334] "Generic (PLEG): container finished" podID="09692f09-ec00-4d98-93c3-f1fc0782619c" containerID="1e21e518e9ce8a0ec1bd2853283b10da09ac7a85804c3b89b126871386d2dd94" exitCode=0 Jan 20 16:54:13 crc kubenswrapper[4995]: I0120 16:54:13.184018 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" event={"ID":"09692f09-ec00-4d98-93c3-f1fc0782619c","Type":"ContainerDied","Data":"1e21e518e9ce8a0ec1bd2853283b10da09ac7a85804c3b89b126871386d2dd94"} Jan 20 16:54:13 crc kubenswrapper[4995]: I0120 16:54:13.184037 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" event={"ID":"09692f09-ec00-4d98-93c3-f1fc0782619c","Type":"ContainerStarted","Data":"889cc6b0cd9bd6f34fc4b98f38986825d81bf01da83c121e80bc2e63394d54c2"} Jan 20 16:54:14 crc kubenswrapper[4995]: I0120 16:54:14.193896 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" event={"ID":"09692f09-ec00-4d98-93c3-f1fc0782619c","Type":"ContainerStarted","Data":"c12559d1b944d74919ec01fdb1044c244e4cebd46029c153a72f82c73d03ad74"} Jan 20 16:54:14 crc kubenswrapper[4995]: I0120 16:54:14.217727 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" podStartSLOduration=3.217704627 podStartE2EDuration="3.217704627s" podCreationTimestamp="2026-01-20 16:54:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:54:14.211995403 +0000 UTC m=+1372.456600209" watchObservedRunningTime="2026-01-20 16:54:14.217704627 +0000 UTC m=+1372.462309433" Jan 20 16:54:15 crc kubenswrapper[4995]: I0120 16:54:15.202106 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.600479 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6tmnt"] Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.604514 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.613316 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6tmnt"] Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.744630 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-utilities\") pod \"redhat-operators-6tmnt\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.744867 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnhcz\" (UniqueName: \"kubernetes.io/projected/67d81188-389f-43f3-939a-4f9654900aac-kube-api-access-bnhcz\") pod \"redhat-operators-6tmnt\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.745051 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-catalog-content\") pod \"redhat-operators-6tmnt\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.847034 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-utilities\") pod \"redhat-operators-6tmnt\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.847185 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnhcz\" (UniqueName: \"kubernetes.io/projected/67d81188-389f-43f3-939a-4f9654900aac-kube-api-access-bnhcz\") pod \"redhat-operators-6tmnt\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.847307 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-catalog-content\") pod \"redhat-operators-6tmnt\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.847867 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-catalog-content\") pod \"redhat-operators-6tmnt\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.847883 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-utilities\") pod \"redhat-operators-6tmnt\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.872019 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnhcz\" (UniqueName: \"kubernetes.io/projected/67d81188-389f-43f3-939a-4f9654900aac-kube-api-access-bnhcz\") pod \"redhat-operators-6tmnt\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:17 crc kubenswrapper[4995]: I0120 16:54:17.927954 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:18 crc kubenswrapper[4995]: I0120 16:54:18.436105 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6tmnt"] Jan 20 16:54:19 crc kubenswrapper[4995]: I0120 16:54:19.249399 4995 generic.go:334] "Generic (PLEG): container finished" podID="67d81188-389f-43f3-939a-4f9654900aac" containerID="dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04" exitCode=0 Jan 20 16:54:19 crc kubenswrapper[4995]: I0120 16:54:19.249903 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tmnt" event={"ID":"67d81188-389f-43f3-939a-4f9654900aac","Type":"ContainerDied","Data":"dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04"} Jan 20 16:54:19 crc kubenswrapper[4995]: I0120 16:54:19.249932 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tmnt" event={"ID":"67d81188-389f-43f3-939a-4f9654900aac","Type":"ContainerStarted","Data":"72bb7795ec447db66a9694dc83373d51edc22d98a7bcb9124eca1bb481616fde"} Jan 20 16:54:20 crc kubenswrapper[4995]: I0120 16:54:20.262598 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tmnt" event={"ID":"67d81188-389f-43f3-939a-4f9654900aac","Type":"ContainerStarted","Data":"f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98"} Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.004147 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.094178 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-tv7m9"] Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.094495 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" podUID="d205e258-5f50-4d69-a33f-37bce8c1d479" containerName="dnsmasq-dns" containerID="cri-o://ab3877b92388988d0b8ec4c03964c1280d4626ae669d98163d1720204a9e04c4" gracePeriod=10 Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.197644 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-b2hmj"] Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.201877 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.217961 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-b2hmj"] Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.254839 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxqqq\" (UniqueName: \"kubernetes.io/projected/4d44782e-c760-4297-8d8b-8e87526ffbdb-kube-api-access-zxqqq\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.254944 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.254992 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.255021 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.255120 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.255191 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.255221 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-config\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.356853 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.356945 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.356975 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-config\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.357033 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxqqq\" (UniqueName: \"kubernetes.io/projected/4d44782e-c760-4297-8d8b-8e87526ffbdb-kube-api-access-zxqqq\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.357117 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.357158 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.357183 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.357788 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-config\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.357849 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.358207 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.358234 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.358399 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.358471 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4d44782e-c760-4297-8d8b-8e87526ffbdb-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.376479 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxqqq\" (UniqueName: \"kubernetes.io/projected/4d44782e-c760-4297-8d8b-8e87526ffbdb-kube-api-access-zxqqq\") pod \"dnsmasq-dns-6cd9bffc9-b2hmj\" (UID: \"4d44782e-c760-4297-8d8b-8e87526ffbdb\") " pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:22 crc kubenswrapper[4995]: I0120 16:54:22.581877 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:23 crc kubenswrapper[4995]: I0120 16:54:23.087195 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-b2hmj"] Jan 20 16:54:23 crc kubenswrapper[4995]: I0120 16:54:23.301308 4995 generic.go:334] "Generic (PLEG): container finished" podID="67d81188-389f-43f3-939a-4f9654900aac" containerID="f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98" exitCode=0 Jan 20 16:54:23 crc kubenswrapper[4995]: I0120 16:54:23.301373 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tmnt" event={"ID":"67d81188-389f-43f3-939a-4f9654900aac","Type":"ContainerDied","Data":"f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98"} Jan 20 16:54:23 crc kubenswrapper[4995]: I0120 16:54:23.302949 4995 generic.go:334] "Generic (PLEG): container finished" podID="d205e258-5f50-4d69-a33f-37bce8c1d479" containerID="ab3877b92388988d0b8ec4c03964c1280d4626ae669d98163d1720204a9e04c4" exitCode=0 Jan 20 16:54:23 crc kubenswrapper[4995]: I0120 16:54:23.303020 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" event={"ID":"d205e258-5f50-4d69-a33f-37bce8c1d479","Type":"ContainerDied","Data":"ab3877b92388988d0b8ec4c03964c1280d4626ae669d98163d1720204a9e04c4"} Jan 20 16:54:23 crc kubenswrapper[4995]: I0120 16:54:23.304858 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" event={"ID":"4d44782e-c760-4297-8d8b-8e87526ffbdb","Type":"ContainerStarted","Data":"686220fce908860bdadecc04b503c1329716b737d0be0e8d8890d0dbbe0714f0"} Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.278966 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.300845 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-nb\") pod \"d205e258-5f50-4d69-a33f-37bce8c1d479\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.300945 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-config\") pod \"d205e258-5f50-4d69-a33f-37bce8c1d479\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.301115 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-sb\") pod \"d205e258-5f50-4d69-a33f-37bce8c1d479\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.301172 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rngs9\" (UniqueName: \"kubernetes.io/projected/d205e258-5f50-4d69-a33f-37bce8c1d479-kube-api-access-rngs9\") pod \"d205e258-5f50-4d69-a33f-37bce8c1d479\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.301233 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-swift-storage-0\") pod \"d205e258-5f50-4d69-a33f-37bce8c1d479\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.301398 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-svc\") pod \"d205e258-5f50-4d69-a33f-37bce8c1d479\" (UID: \"d205e258-5f50-4d69-a33f-37bce8c1d479\") " Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.322369 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d205e258-5f50-4d69-a33f-37bce8c1d479-kube-api-access-rngs9" (OuterVolumeSpecName: "kube-api-access-rngs9") pod "d205e258-5f50-4d69-a33f-37bce8c1d479" (UID: "d205e258-5f50-4d69-a33f-37bce8c1d479"). InnerVolumeSpecName "kube-api-access-rngs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.336203 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tmnt" event={"ID":"67d81188-389f-43f3-939a-4f9654900aac","Type":"ContainerStarted","Data":"ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf"} Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.344861 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" event={"ID":"d205e258-5f50-4d69-a33f-37bce8c1d479","Type":"ContainerDied","Data":"b0e58c8867415f2a362db07310d090d6c42db4d673a4f2076ce9e3d330e3a27d"} Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.344947 4995 scope.go:117] "RemoveContainer" containerID="ab3877b92388988d0b8ec4c03964c1280d4626ae669d98163d1720204a9e04c4" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.345222 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-tv7m9" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.353980 4995 generic.go:334] "Generic (PLEG): container finished" podID="4d44782e-c760-4297-8d8b-8e87526ffbdb" containerID="70aca9c43efe54932183f05108286e961f707cda8c4a1140d6d2fd37ba82b53c" exitCode=0 Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.354032 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" event={"ID":"4d44782e-c760-4297-8d8b-8e87526ffbdb","Type":"ContainerDied","Data":"70aca9c43efe54932183f05108286e961f707cda8c4a1140d6d2fd37ba82b53c"} Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.364512 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6tmnt" podStartSLOduration=2.714213535 podStartE2EDuration="7.364494317s" podCreationTimestamp="2026-01-20 16:54:17 +0000 UTC" firstStartedPulling="2026-01-20 16:54:19.253476053 +0000 UTC m=+1377.498080859" lastFinishedPulling="2026-01-20 16:54:23.903756835 +0000 UTC m=+1382.148361641" observedRunningTime="2026-01-20 16:54:24.362227276 +0000 UTC m=+1382.606832082" watchObservedRunningTime="2026-01-20 16:54:24.364494317 +0000 UTC m=+1382.609099123" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.405697 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rngs9\" (UniqueName: \"kubernetes.io/projected/d205e258-5f50-4d69-a33f-37bce8c1d479-kube-api-access-rngs9\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.427756 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-config" (OuterVolumeSpecName: "config") pod "d205e258-5f50-4d69-a33f-37bce8c1d479" (UID: "d205e258-5f50-4d69-a33f-37bce8c1d479"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.451931 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d205e258-5f50-4d69-a33f-37bce8c1d479" (UID: "d205e258-5f50-4d69-a33f-37bce8c1d479"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.452884 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d205e258-5f50-4d69-a33f-37bce8c1d479" (UID: "d205e258-5f50-4d69-a33f-37bce8c1d479"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.453773 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d205e258-5f50-4d69-a33f-37bce8c1d479" (UID: "d205e258-5f50-4d69-a33f-37bce8c1d479"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.464231 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d205e258-5f50-4d69-a33f-37bce8c1d479" (UID: "d205e258-5f50-4d69-a33f-37bce8c1d479"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.506543 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.506831 4995 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.506887 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.506898 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.506907 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d205e258-5f50-4d69-a33f-37bce8c1d479-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.568171 4995 scope.go:117] "RemoveContainer" containerID="4828abc5495fb3b236ef08dadd2aeb646e583eb524db99208cf9b6b06a763ca6" Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.685787 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-tv7m9"] Jan 20 16:54:24 crc kubenswrapper[4995]: I0120 16:54:24.706604 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-tv7m9"] Jan 20 16:54:25 crc kubenswrapper[4995]: I0120 16:54:25.364799 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" event={"ID":"4d44782e-c760-4297-8d8b-8e87526ffbdb","Type":"ContainerStarted","Data":"3e6210f6df1a34a10e7fb747ba14194c882d2db91787a12e87f23524e15d018d"} Jan 20 16:54:25 crc kubenswrapper[4995]: I0120 16:54:25.392973 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" podStartSLOduration=3.39295254 podStartE2EDuration="3.39295254s" podCreationTimestamp="2026-01-20 16:54:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:54:25.38890732 +0000 UTC m=+1383.633512126" watchObservedRunningTime="2026-01-20 16:54:25.39295254 +0000 UTC m=+1383.637557346" Jan 20 16:54:26 crc kubenswrapper[4995]: I0120 16:54:26.001611 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d205e258-5f50-4d69-a33f-37bce8c1d479" path="/var/lib/kubelet/pods/d205e258-5f50-4d69-a33f-37bce8c1d479/volumes" Jan 20 16:54:26 crc kubenswrapper[4995]: I0120 16:54:26.377531 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:27 crc kubenswrapper[4995]: I0120 16:54:27.928236 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:27 crc kubenswrapper[4995]: I0120 16:54:27.928517 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:28 crc kubenswrapper[4995]: I0120 16:54:28.982902 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6tmnt" podUID="67d81188-389f-43f3-939a-4f9654900aac" containerName="registry-server" probeResult="failure" output=< Jan 20 16:54:28 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 16:54:28 crc kubenswrapper[4995]: > Jan 20 16:54:32 crc kubenswrapper[4995]: I0120 16:54:32.584111 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cd9bffc9-b2hmj" Jan 20 16:54:32 crc kubenswrapper[4995]: I0120 16:54:32.651700 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-spslg"] Jan 20 16:54:32 crc kubenswrapper[4995]: I0120 16:54:32.651957 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" podUID="09692f09-ec00-4d98-93c3-f1fc0782619c" containerName="dnsmasq-dns" containerID="cri-o://c12559d1b944d74919ec01fdb1044c244e4cebd46029c153a72f82c73d03ad74" gracePeriod=10 Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.461306 4995 generic.go:334] "Generic (PLEG): container finished" podID="09692f09-ec00-4d98-93c3-f1fc0782619c" containerID="c12559d1b944d74919ec01fdb1044c244e4cebd46029c153a72f82c73d03ad74" exitCode=0 Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.461610 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" event={"ID":"09692f09-ec00-4d98-93c3-f1fc0782619c","Type":"ContainerDied","Data":"c12559d1b944d74919ec01fdb1044c244e4cebd46029c153a72f82c73d03ad74"} Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.624054 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.794863 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-config\") pod \"09692f09-ec00-4d98-93c3-f1fc0782619c\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.794912 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4kf2\" (UniqueName: \"kubernetes.io/projected/09692f09-ec00-4d98-93c3-f1fc0782619c-kube-api-access-j4kf2\") pod \"09692f09-ec00-4d98-93c3-f1fc0782619c\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.795032 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-swift-storage-0\") pod \"09692f09-ec00-4d98-93c3-f1fc0782619c\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.795093 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-nb\") pod \"09692f09-ec00-4d98-93c3-f1fc0782619c\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.795183 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-openstack-edpm-ipam\") pod \"09692f09-ec00-4d98-93c3-f1fc0782619c\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.795304 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-svc\") pod \"09692f09-ec00-4d98-93c3-f1fc0782619c\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.795369 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-sb\") pod \"09692f09-ec00-4d98-93c3-f1fc0782619c\" (UID: \"09692f09-ec00-4d98-93c3-f1fc0782619c\") " Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.806289 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09692f09-ec00-4d98-93c3-f1fc0782619c-kube-api-access-j4kf2" (OuterVolumeSpecName: "kube-api-access-j4kf2") pod "09692f09-ec00-4d98-93c3-f1fc0782619c" (UID: "09692f09-ec00-4d98-93c3-f1fc0782619c"). InnerVolumeSpecName "kube-api-access-j4kf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.859532 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "09692f09-ec00-4d98-93c3-f1fc0782619c" (UID: "09692f09-ec00-4d98-93c3-f1fc0782619c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.861518 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "09692f09-ec00-4d98-93c3-f1fc0782619c" (UID: "09692f09-ec00-4d98-93c3-f1fc0782619c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.877263 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "09692f09-ec00-4d98-93c3-f1fc0782619c" (UID: "09692f09-ec00-4d98-93c3-f1fc0782619c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.879266 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-config" (OuterVolumeSpecName: "config") pod "09692f09-ec00-4d98-93c3-f1fc0782619c" (UID: "09692f09-ec00-4d98-93c3-f1fc0782619c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.879908 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "09692f09-ec00-4d98-93c3-f1fc0782619c" (UID: "09692f09-ec00-4d98-93c3-f1fc0782619c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.889633 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "09692f09-ec00-4d98-93c3-f1fc0782619c" (UID: "09692f09-ec00-4d98-93c3-f1fc0782619c"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.898051 4995 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.898291 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.898310 4995 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.898321 4995 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.898331 4995 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.898340 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09692f09-ec00-4d98-93c3-f1fc0782619c-config\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:33 crc kubenswrapper[4995]: I0120 16:54:33.898348 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4kf2\" (UniqueName: \"kubernetes.io/projected/09692f09-ec00-4d98-93c3-f1fc0782619c-kube-api-access-j4kf2\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:34 crc kubenswrapper[4995]: I0120 16:54:34.484902 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" Jan 20 16:54:34 crc kubenswrapper[4995]: I0120 16:54:34.486246 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-spslg" event={"ID":"09692f09-ec00-4d98-93c3-f1fc0782619c","Type":"ContainerDied","Data":"889cc6b0cd9bd6f34fc4b98f38986825d81bf01da83c121e80bc2e63394d54c2"} Jan 20 16:54:34 crc kubenswrapper[4995]: I0120 16:54:34.486331 4995 scope.go:117] "RemoveContainer" containerID="c12559d1b944d74919ec01fdb1044c244e4cebd46029c153a72f82c73d03ad74" Jan 20 16:54:34 crc kubenswrapper[4995]: I0120 16:54:34.515483 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-spslg"] Jan 20 16:54:34 crc kubenswrapper[4995]: I0120 16:54:34.524694 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-spslg"] Jan 20 16:54:34 crc kubenswrapper[4995]: I0120 16:54:34.528326 4995 scope.go:117] "RemoveContainer" containerID="1e21e518e9ce8a0ec1bd2853283b10da09ac7a85804c3b89b126871386d2dd94" Jan 20 16:54:36 crc kubenswrapper[4995]: I0120 16:54:36.000793 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09692f09-ec00-4d98-93c3-f1fc0782619c" path="/var/lib/kubelet/pods/09692f09-ec00-4d98-93c3-f1fc0782619c/volumes" Jan 20 16:54:38 crc kubenswrapper[4995]: I0120 16:54:38.010910 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:38 crc kubenswrapper[4995]: I0120 16:54:38.101289 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:38 crc kubenswrapper[4995]: I0120 16:54:38.263410 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6tmnt"] Jan 20 16:54:39 crc kubenswrapper[4995]: I0120 16:54:39.545711 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6tmnt" podUID="67d81188-389f-43f3-939a-4f9654900aac" containerName="registry-server" containerID="cri-o://ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf" gracePeriod=2 Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.036872 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.141455 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnhcz\" (UniqueName: \"kubernetes.io/projected/67d81188-389f-43f3-939a-4f9654900aac-kube-api-access-bnhcz\") pod \"67d81188-389f-43f3-939a-4f9654900aac\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.141562 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-catalog-content\") pod \"67d81188-389f-43f3-939a-4f9654900aac\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.141631 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-utilities\") pod \"67d81188-389f-43f3-939a-4f9654900aac\" (UID: \"67d81188-389f-43f3-939a-4f9654900aac\") " Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.142423 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-utilities" (OuterVolumeSpecName: "utilities") pod "67d81188-389f-43f3-939a-4f9654900aac" (UID: "67d81188-389f-43f3-939a-4f9654900aac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.146025 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.149366 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67d81188-389f-43f3-939a-4f9654900aac-kube-api-access-bnhcz" (OuterVolumeSpecName: "kube-api-access-bnhcz") pod "67d81188-389f-43f3-939a-4f9654900aac" (UID: "67d81188-389f-43f3-939a-4f9654900aac"). InnerVolumeSpecName "kube-api-access-bnhcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.248347 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnhcz\" (UniqueName: \"kubernetes.io/projected/67d81188-389f-43f3-939a-4f9654900aac-kube-api-access-bnhcz\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.266700 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67d81188-389f-43f3-939a-4f9654900aac" (UID: "67d81188-389f-43f3-939a-4f9654900aac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.350675 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67d81188-389f-43f3-939a-4f9654900aac-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.557773 4995 generic.go:334] "Generic (PLEG): container finished" podID="67d81188-389f-43f3-939a-4f9654900aac" containerID="ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf" exitCode=0 Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.557821 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tmnt" event={"ID":"67d81188-389f-43f3-939a-4f9654900aac","Type":"ContainerDied","Data":"ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf"} Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.557849 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6tmnt" event={"ID":"67d81188-389f-43f3-939a-4f9654900aac","Type":"ContainerDied","Data":"72bb7795ec447db66a9694dc83373d51edc22d98a7bcb9124eca1bb481616fde"} Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.557865 4995 scope.go:117] "RemoveContainer" containerID="ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.558029 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6tmnt" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.600258 4995 scope.go:117] "RemoveContainer" containerID="f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.600947 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6tmnt"] Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.610191 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6tmnt"] Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.638715 4995 scope.go:117] "RemoveContainer" containerID="dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.675838 4995 scope.go:117] "RemoveContainer" containerID="ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf" Jan 20 16:54:40 crc kubenswrapper[4995]: E0120 16:54:40.676694 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf\": container with ID starting with ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf not found: ID does not exist" containerID="ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.676838 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf"} err="failed to get container status \"ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf\": rpc error: code = NotFound desc = could not find container \"ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf\": container with ID starting with ac147af677bd0df78c2f17e1b631117270bddf45c1c0c712012e80d369fd4eaf not found: ID does not exist" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.676979 4995 scope.go:117] "RemoveContainer" containerID="f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98" Jan 20 16:54:40 crc kubenswrapper[4995]: E0120 16:54:40.677605 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98\": container with ID starting with f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98 not found: ID does not exist" containerID="f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.677688 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98"} err="failed to get container status \"f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98\": rpc error: code = NotFound desc = could not find container \"f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98\": container with ID starting with f05c51816d013892ea6e9ea36bd206e049a276664f9951c0324dc0d6b4a17f98 not found: ID does not exist" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.677727 4995 scope.go:117] "RemoveContainer" containerID="dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04" Jan 20 16:54:40 crc kubenswrapper[4995]: E0120 16:54:40.678314 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04\": container with ID starting with dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04 not found: ID does not exist" containerID="dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04" Jan 20 16:54:40 crc kubenswrapper[4995]: I0120 16:54:40.678448 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04"} err="failed to get container status \"dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04\": rpc error: code = NotFound desc = could not find container \"dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04\": container with ID starting with dbbf61ab7edfcfe59361c8010eb239a22119962c2b8eb3199272e3484889ae04 not found: ID does not exist" Jan 20 16:54:42 crc kubenswrapper[4995]: I0120 16:54:42.008373 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67d81188-389f-43f3-939a-4f9654900aac" path="/var/lib/kubelet/pods/67d81188-389f-43f3-939a-4f9654900aac/volumes" Jan 20 16:54:43 crc kubenswrapper[4995]: I0120 16:54:43.602125 4995 generic.go:334] "Generic (PLEG): container finished" podID="cfa14e5d-418a-4eed-96fe-fef4b2a88543" containerID="577ade6b383ff5ec7cdcfa03185022faea0b0ff1817b28f8b8f2b644c711a260" exitCode=0 Jan 20 16:54:43 crc kubenswrapper[4995]: I0120 16:54:43.602193 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfa14e5d-418a-4eed-96fe-fef4b2a88543","Type":"ContainerDied","Data":"577ade6b383ff5ec7cdcfa03185022faea0b0ff1817b28f8b8f2b644c711a260"} Jan 20 16:54:44 crc kubenswrapper[4995]: I0120 16:54:44.614303 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cfa14e5d-418a-4eed-96fe-fef4b2a88543","Type":"ContainerStarted","Data":"b73630124618811f15d0a477abd9f0057846db0b148f62b73b1f8ee7370a631f"} Jan 20 16:54:44 crc kubenswrapper[4995]: I0120 16:54:44.614858 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 20 16:54:44 crc kubenswrapper[4995]: I0120 16:54:44.643883 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.643862053 podStartE2EDuration="36.643862053s" podCreationTimestamp="2026-01-20 16:54:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:54:44.637365977 +0000 UTC m=+1402.881970793" watchObservedRunningTime="2026-01-20 16:54:44.643862053 +0000 UTC m=+1402.888466869" Jan 20 16:54:45 crc kubenswrapper[4995]: I0120 16:54:45.624459 4995 generic.go:334] "Generic (PLEG): container finished" podID="a10b59cc-41b2-49f9-ba12-2bdb82b568f7" containerID="d645cf4f8c2070f53c34ba3d3b098c7e95c2b1e085dd538df4e6260de55a5b0b" exitCode=0 Jan 20 16:54:45 crc kubenswrapper[4995]: I0120 16:54:45.624552 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a10b59cc-41b2-49f9-ba12-2bdb82b568f7","Type":"ContainerDied","Data":"d645cf4f8c2070f53c34ba3d3b098c7e95c2b1e085dd538df4e6260de55a5b0b"} Jan 20 16:54:46 crc kubenswrapper[4995]: I0120 16:54:46.633942 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a10b59cc-41b2-49f9-ba12-2bdb82b568f7","Type":"ContainerStarted","Data":"a08d99611b089cda7f3f6ed383a74bdeb139a1ba15d41ca6080913ddda554254"} Jan 20 16:54:46 crc kubenswrapper[4995]: I0120 16:54:46.634413 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:54:46 crc kubenswrapper[4995]: I0120 16:54:46.656118 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.656069726 podStartE2EDuration="36.656069726s" podCreationTimestamp="2026-01-20 16:54:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 16:54:46.655070768 +0000 UTC m=+1404.899675584" watchObservedRunningTime="2026-01-20 16:54:46.656069726 +0000 UTC m=+1404.900674542" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.162585 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz"] Jan 20 16:54:51 crc kubenswrapper[4995]: E0120 16:54:51.164780 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67d81188-389f-43f3-939a-4f9654900aac" containerName="registry-server" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.164876 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="67d81188-389f-43f3-939a-4f9654900aac" containerName="registry-server" Jan 20 16:54:51 crc kubenswrapper[4995]: E0120 16:54:51.164968 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09692f09-ec00-4d98-93c3-f1fc0782619c" containerName="dnsmasq-dns" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.165048 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="09692f09-ec00-4d98-93c3-f1fc0782619c" containerName="dnsmasq-dns" Jan 20 16:54:51 crc kubenswrapper[4995]: E0120 16:54:51.165153 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d205e258-5f50-4d69-a33f-37bce8c1d479" containerName="dnsmasq-dns" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.165230 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d205e258-5f50-4d69-a33f-37bce8c1d479" containerName="dnsmasq-dns" Jan 20 16:54:51 crc kubenswrapper[4995]: E0120 16:54:51.165312 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67d81188-389f-43f3-939a-4f9654900aac" containerName="extract-utilities" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.165381 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="67d81188-389f-43f3-939a-4f9654900aac" containerName="extract-utilities" Jan 20 16:54:51 crc kubenswrapper[4995]: E0120 16:54:51.165465 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09692f09-ec00-4d98-93c3-f1fc0782619c" containerName="init" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.165550 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="09692f09-ec00-4d98-93c3-f1fc0782619c" containerName="init" Jan 20 16:54:51 crc kubenswrapper[4995]: E0120 16:54:51.165627 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d205e258-5f50-4d69-a33f-37bce8c1d479" containerName="init" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.165702 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d205e258-5f50-4d69-a33f-37bce8c1d479" containerName="init" Jan 20 16:54:51 crc kubenswrapper[4995]: E0120 16:54:51.165805 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67d81188-389f-43f3-939a-4f9654900aac" containerName="extract-content" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.165878 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="67d81188-389f-43f3-939a-4f9654900aac" containerName="extract-content" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.166247 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="d205e258-5f50-4d69-a33f-37bce8c1d479" containerName="dnsmasq-dns" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.166341 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="67d81188-389f-43f3-939a-4f9654900aac" containerName="registry-server" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.166438 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="09692f09-ec00-4d98-93c3-f1fc0782619c" containerName="dnsmasq-dns" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.167439 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.170384 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.170411 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.170695 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.170777 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.177038 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz"] Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.286012 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.286146 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.286190 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.286291 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9t5b\" (UniqueName: \"kubernetes.io/projected/0b6494de-6466-4ecf-99d4-e410e3829130-kube-api-access-q9t5b\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.388285 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.388376 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.388409 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.388472 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9t5b\" (UniqueName: \"kubernetes.io/projected/0b6494de-6466-4ecf-99d4-e410e3829130-kube-api-access-q9t5b\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.394936 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.395576 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.401460 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.425709 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9t5b\" (UniqueName: \"kubernetes.io/projected/0b6494de-6466-4ecf-99d4-e410e3829130-kube-api-access-q9t5b\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:51 crc kubenswrapper[4995]: I0120 16:54:51.488391 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:54:52 crc kubenswrapper[4995]: W0120 16:54:52.003011 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0b6494de_6466_4ecf_99d4_e410e3829130.slice/crio-38fe2be57f5b877f1c9fc31bc1fb2ff403a19a307499fe0a2219ed638a82cef7 WatchSource:0}: Error finding container 38fe2be57f5b877f1c9fc31bc1fb2ff403a19a307499fe0a2219ed638a82cef7: Status 404 returned error can't find the container with id 38fe2be57f5b877f1c9fc31bc1fb2ff403a19a307499fe0a2219ed638a82cef7 Jan 20 16:54:52 crc kubenswrapper[4995]: I0120 16:54:52.021113 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz"] Jan 20 16:54:52 crc kubenswrapper[4995]: I0120 16:54:52.680931 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" event={"ID":"0b6494de-6466-4ecf-99d4-e410e3829130","Type":"ContainerStarted","Data":"38fe2be57f5b877f1c9fc31bc1fb2ff403a19a307499fe0a2219ed638a82cef7"} Jan 20 16:54:58 crc kubenswrapper[4995]: I0120 16:54:58.521749 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 20 16:55:00 crc kubenswrapper[4995]: I0120 16:55:00.558322 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 20 16:55:04 crc kubenswrapper[4995]: I0120 16:55:04.087287 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" event={"ID":"0b6494de-6466-4ecf-99d4-e410e3829130","Type":"ContainerStarted","Data":"56174aa49731133fde5252742c186049e03edbc96db0d36d64719deafbfa14dc"} Jan 20 16:55:04 crc kubenswrapper[4995]: I0120 16:55:04.113717 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" podStartSLOduration=2.41264791 podStartE2EDuration="13.113689225s" podCreationTimestamp="2026-01-20 16:54:51 +0000 UTC" firstStartedPulling="2026-01-20 16:54:52.006730062 +0000 UTC m=+1410.251334878" lastFinishedPulling="2026-01-20 16:55:02.707771387 +0000 UTC m=+1420.952376193" observedRunningTime="2026-01-20 16:55:04.109178092 +0000 UTC m=+1422.353782908" watchObservedRunningTime="2026-01-20 16:55:04.113689225 +0000 UTC m=+1422.358294031" Jan 20 16:55:15 crc kubenswrapper[4995]: I0120 16:55:15.200879 4995 generic.go:334] "Generic (PLEG): container finished" podID="0b6494de-6466-4ecf-99d4-e410e3829130" containerID="56174aa49731133fde5252742c186049e03edbc96db0d36d64719deafbfa14dc" exitCode=0 Jan 20 16:55:15 crc kubenswrapper[4995]: I0120 16:55:15.200965 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" event={"ID":"0b6494de-6466-4ecf-99d4-e410e3829130","Type":"ContainerDied","Data":"56174aa49731133fde5252742c186049e03edbc96db0d36d64719deafbfa14dc"} Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.695828 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.734950 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9t5b\" (UniqueName: \"kubernetes.io/projected/0b6494de-6466-4ecf-99d4-e410e3829130-kube-api-access-q9t5b\") pod \"0b6494de-6466-4ecf-99d4-e410e3829130\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.735057 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-repo-setup-combined-ca-bundle\") pod \"0b6494de-6466-4ecf-99d4-e410e3829130\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.735205 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-inventory\") pod \"0b6494de-6466-4ecf-99d4-e410e3829130\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.735286 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-ssh-key-openstack-edpm-ipam\") pod \"0b6494de-6466-4ecf-99d4-e410e3829130\" (UID: \"0b6494de-6466-4ecf-99d4-e410e3829130\") " Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.741699 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b6494de-6466-4ecf-99d4-e410e3829130-kube-api-access-q9t5b" (OuterVolumeSpecName: "kube-api-access-q9t5b") pod "0b6494de-6466-4ecf-99d4-e410e3829130" (UID: "0b6494de-6466-4ecf-99d4-e410e3829130"). InnerVolumeSpecName "kube-api-access-q9t5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.742701 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "0b6494de-6466-4ecf-99d4-e410e3829130" (UID: "0b6494de-6466-4ecf-99d4-e410e3829130"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.766579 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0b6494de-6466-4ecf-99d4-e410e3829130" (UID: "0b6494de-6466-4ecf-99d4-e410e3829130"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.769601 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-inventory" (OuterVolumeSpecName: "inventory") pod "0b6494de-6466-4ecf-99d4-e410e3829130" (UID: "0b6494de-6466-4ecf-99d4-e410e3829130"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.837596 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9t5b\" (UniqueName: \"kubernetes.io/projected/0b6494de-6466-4ecf-99d4-e410e3829130-kube-api-access-q9t5b\") on node \"crc\" DevicePath \"\"" Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.837634 4995 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.837647 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 16:55:16 crc kubenswrapper[4995]: I0120 16:55:16.837655 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0b6494de-6466-4ecf-99d4-e410e3829130-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.221264 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" event={"ID":"0b6494de-6466-4ecf-99d4-e410e3829130","Type":"ContainerDied","Data":"38fe2be57f5b877f1c9fc31bc1fb2ff403a19a307499fe0a2219ed638a82cef7"} Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.221306 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="38fe2be57f5b877f1c9fc31bc1fb2ff403a19a307499fe0a2219ed638a82cef7" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.221381 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.320116 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv"] Jan 20 16:55:17 crc kubenswrapper[4995]: E0120 16:55:17.320883 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b6494de-6466-4ecf-99d4-e410e3829130" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.322216 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b6494de-6466-4ecf-99d4-e410e3829130" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.322839 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b6494de-6466-4ecf-99d4-e410e3829130" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.323784 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.327431 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.330363 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.332101 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.335581 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.335725 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv"] Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.346744 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-tf4fv\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.346896 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7ks7\" (UniqueName: \"kubernetes.io/projected/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-kube-api-access-s7ks7\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-tf4fv\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.346964 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-tf4fv\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.448631 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-tf4fv\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.449057 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-tf4fv\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.449296 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7ks7\" (UniqueName: \"kubernetes.io/projected/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-kube-api-access-s7ks7\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-tf4fv\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.452300 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-tf4fv\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.454625 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-tf4fv\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.468034 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7ks7\" (UniqueName: \"kubernetes.io/projected/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-kube-api-access-s7ks7\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-tf4fv\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:17 crc kubenswrapper[4995]: I0120 16:55:17.640819 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:18 crc kubenswrapper[4995]: I0120 16:55:18.182941 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv"] Jan 20 16:55:18 crc kubenswrapper[4995]: W0120 16:55:18.189094 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e4edc52_6ba0_441c_abeb_a7f17b0cb31f.slice/crio-99b61fde453ce5459569bd3ba9260232b3d5c6beeef3d441bf3b20af33455f07 WatchSource:0}: Error finding container 99b61fde453ce5459569bd3ba9260232b3d5c6beeef3d441bf3b20af33455f07: Status 404 returned error can't find the container with id 99b61fde453ce5459569bd3ba9260232b3d5c6beeef3d441bf3b20af33455f07 Jan 20 16:55:18 crc kubenswrapper[4995]: I0120 16:55:18.233184 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" event={"ID":"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f","Type":"ContainerStarted","Data":"99b61fde453ce5459569bd3ba9260232b3d5c6beeef3d441bf3b20af33455f07"} Jan 20 16:55:19 crc kubenswrapper[4995]: I0120 16:55:19.243487 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" event={"ID":"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f","Type":"ContainerStarted","Data":"654453fc2a6d8c2fbc7cdfef38616a45935857864de1b5cae09d9b59237bd564"} Jan 20 16:55:19 crc kubenswrapper[4995]: I0120 16:55:19.263950 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" podStartSLOduration=1.746835431 podStartE2EDuration="2.263936131s" podCreationTimestamp="2026-01-20 16:55:17 +0000 UTC" firstStartedPulling="2026-01-20 16:55:18.192954587 +0000 UTC m=+1436.437559393" lastFinishedPulling="2026-01-20 16:55:18.710055277 +0000 UTC m=+1436.954660093" observedRunningTime="2026-01-20 16:55:19.25613226 +0000 UTC m=+1437.500737066" watchObservedRunningTime="2026-01-20 16:55:19.263936131 +0000 UTC m=+1437.508540927" Jan 20 16:55:22 crc kubenswrapper[4995]: I0120 16:55:22.274500 4995 generic.go:334] "Generic (PLEG): container finished" podID="7e4edc52-6ba0-441c-abeb-a7f17b0cb31f" containerID="654453fc2a6d8c2fbc7cdfef38616a45935857864de1b5cae09d9b59237bd564" exitCode=0 Jan 20 16:55:22 crc kubenswrapper[4995]: I0120 16:55:22.274576 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" event={"ID":"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f","Type":"ContainerDied","Data":"654453fc2a6d8c2fbc7cdfef38616a45935857864de1b5cae09d9b59237bd564"} Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.719604 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.800787 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7ks7\" (UniqueName: \"kubernetes.io/projected/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-kube-api-access-s7ks7\") pod \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.800867 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-inventory\") pod \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.800966 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-ssh-key-openstack-edpm-ipam\") pod \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\" (UID: \"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f\") " Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.806881 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-kube-api-access-s7ks7" (OuterVolumeSpecName: "kube-api-access-s7ks7") pod "7e4edc52-6ba0-441c-abeb-a7f17b0cb31f" (UID: "7e4edc52-6ba0-441c-abeb-a7f17b0cb31f"). InnerVolumeSpecName "kube-api-access-s7ks7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.831862 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7e4edc52-6ba0-441c-abeb-a7f17b0cb31f" (UID: "7e4edc52-6ba0-441c-abeb-a7f17b0cb31f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.837046 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-inventory" (OuterVolumeSpecName: "inventory") pod "7e4edc52-6ba0-441c-abeb-a7f17b0cb31f" (UID: "7e4edc52-6ba0-441c-abeb-a7f17b0cb31f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.903490 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7ks7\" (UniqueName: \"kubernetes.io/projected/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-kube-api-access-s7ks7\") on node \"crc\" DevicePath \"\"" Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.903528 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 16:55:23 crc kubenswrapper[4995]: I0120 16:55:23.903541 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7e4edc52-6ba0-441c-abeb-a7f17b0cb31f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.299784 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" event={"ID":"7e4edc52-6ba0-441c-abeb-a7f17b0cb31f","Type":"ContainerDied","Data":"99b61fde453ce5459569bd3ba9260232b3d5c6beeef3d441bf3b20af33455f07"} Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.300111 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99b61fde453ce5459569bd3ba9260232b3d5c6beeef3d441bf3b20af33455f07" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.299887 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-tf4fv" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.380290 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b"] Jan 20 16:55:24 crc kubenswrapper[4995]: E0120 16:55:24.381020 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e4edc52-6ba0-441c-abeb-a7f17b0cb31f" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.381068 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e4edc52-6ba0-441c-abeb-a7f17b0cb31f" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.381762 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e4edc52-6ba0-441c-abeb-a7f17b0cb31f" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.383066 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.385568 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.385764 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.385894 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.390591 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.395366 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b"] Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.517278 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.517416 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.517501 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.517616 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44m8j\" (UniqueName: \"kubernetes.io/projected/7426fa32-40ee-4b5e-9d5a-962505929c91-kube-api-access-44m8j\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.625382 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.625631 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44m8j\" (UniqueName: \"kubernetes.io/projected/7426fa32-40ee-4b5e-9d5a-962505929c91-kube-api-access-44m8j\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.626111 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.626367 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.639156 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.644983 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.659358 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.659752 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44m8j\" (UniqueName: \"kubernetes.io/projected/7426fa32-40ee-4b5e-9d5a-962505929c91-kube-api-access-44m8j\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-g484b\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:24 crc kubenswrapper[4995]: I0120 16:55:24.715439 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:55:25 crc kubenswrapper[4995]: I0120 16:55:25.218354 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b"] Jan 20 16:55:25 crc kubenswrapper[4995]: I0120 16:55:25.312681 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" event={"ID":"7426fa32-40ee-4b5e-9d5a-962505929c91","Type":"ContainerStarted","Data":"c4246434a3a3c8d499c286d6bbf90a5b8a684a293db4eaec34acdda51b6960c7"} Jan 20 16:55:26 crc kubenswrapper[4995]: I0120 16:55:26.326265 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" event={"ID":"7426fa32-40ee-4b5e-9d5a-962505929c91","Type":"ContainerStarted","Data":"f53dd95ce9dcac7036479133b31625c82284cea3893b3849b9ab93e130e77db0"} Jan 20 16:55:26 crc kubenswrapper[4995]: I0120 16:55:26.361880 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" podStartSLOduration=1.819131288 podStartE2EDuration="2.361848141s" podCreationTimestamp="2026-01-20 16:55:24 +0000 UTC" firstStartedPulling="2026-01-20 16:55:25.228189732 +0000 UTC m=+1443.472794558" lastFinishedPulling="2026-01-20 16:55:25.770906595 +0000 UTC m=+1444.015511411" observedRunningTime="2026-01-20 16:55:26.352119048 +0000 UTC m=+1444.596723864" watchObservedRunningTime="2026-01-20 16:55:26.361848141 +0000 UTC m=+1444.606452967" Jan 20 16:55:30 crc kubenswrapper[4995]: I0120 16:55:30.294032 4995 scope.go:117] "RemoveContainer" containerID="3cac2fdf7109f5ce096e604dffc348e8c542529f192af67a63494176e5f22513" Jan 20 16:55:30 crc kubenswrapper[4995]: I0120 16:55:30.334594 4995 scope.go:117] "RemoveContainer" containerID="53e8069b443f272a81a3258a117d49234e434225f6460e754babbea2299b9eca" Jan 20 16:55:30 crc kubenswrapper[4995]: I0120 16:55:30.415749 4995 scope.go:117] "RemoveContainer" containerID="c6deeff9c8a9e9703bdcba95dd92ae95ee07ee540ee18aaeae513b1d738df8ec" Jan 20 16:55:30 crc kubenswrapper[4995]: I0120 16:55:30.447188 4995 scope.go:117] "RemoveContainer" containerID="d3c51858ec15fcb97d24111291e64144788ada63c222c79602b2e2ae69d0004f" Jan 20 16:56:00 crc kubenswrapper[4995]: I0120 16:56:00.571527 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:56:00 crc kubenswrapper[4995]: I0120 16:56:00.572244 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.572069 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.572753 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.672774 4995 scope.go:117] "RemoveContainer" containerID="eec45b2cea5651bc2a81854648aa2f0a8fd3c0b130b5982c1fee687be4f46b9f" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.812159 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rcb82"] Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.817377 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.831931 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rcb82"] Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.876303 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96c8f\" (UniqueName: \"kubernetes.io/projected/6f9f5b30-6ffb-433f-a300-b28d79d6d964-kube-api-access-96c8f\") pod \"community-operators-rcb82\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.876384 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-catalog-content\") pod \"community-operators-rcb82\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.876732 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-utilities\") pod \"community-operators-rcb82\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.978671 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96c8f\" (UniqueName: \"kubernetes.io/projected/6f9f5b30-6ffb-433f-a300-b28d79d6d964-kube-api-access-96c8f\") pod \"community-operators-rcb82\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.978752 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-catalog-content\") pod \"community-operators-rcb82\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.978831 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-utilities\") pod \"community-operators-rcb82\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.979393 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-utilities\") pod \"community-operators-rcb82\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:30 crc kubenswrapper[4995]: I0120 16:56:30.979410 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-catalog-content\") pod \"community-operators-rcb82\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:31 crc kubenswrapper[4995]: I0120 16:56:31.007909 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96c8f\" (UniqueName: \"kubernetes.io/projected/6f9f5b30-6ffb-433f-a300-b28d79d6d964-kube-api-access-96c8f\") pod \"community-operators-rcb82\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:31 crc kubenswrapper[4995]: I0120 16:56:31.141824 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:31 crc kubenswrapper[4995]: I0120 16:56:31.833805 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rcb82"] Jan 20 16:56:32 crc kubenswrapper[4995]: I0120 16:56:32.076357 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcb82" event={"ID":"6f9f5b30-6ffb-433f-a300-b28d79d6d964","Type":"ContainerStarted","Data":"4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794"} Jan 20 16:56:32 crc kubenswrapper[4995]: I0120 16:56:32.076471 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcb82" event={"ID":"6f9f5b30-6ffb-433f-a300-b28d79d6d964","Type":"ContainerStarted","Data":"689b07f13686962d1d911862c730337158aaaba15b81fb95e20f01c7a7ed1a76"} Jan 20 16:56:33 crc kubenswrapper[4995]: I0120 16:56:33.086905 4995 generic.go:334] "Generic (PLEG): container finished" podID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerID="4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794" exitCode=0 Jan 20 16:56:33 crc kubenswrapper[4995]: I0120 16:56:33.087327 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcb82" event={"ID":"6f9f5b30-6ffb-433f-a300-b28d79d6d964","Type":"ContainerDied","Data":"4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794"} Jan 20 16:56:35 crc kubenswrapper[4995]: I0120 16:56:35.112143 4995 generic.go:334] "Generic (PLEG): container finished" podID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerID="8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b" exitCode=0 Jan 20 16:56:35 crc kubenswrapper[4995]: I0120 16:56:35.112183 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcb82" event={"ID":"6f9f5b30-6ffb-433f-a300-b28d79d6d964","Type":"ContainerDied","Data":"8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b"} Jan 20 16:56:36 crc kubenswrapper[4995]: I0120 16:56:36.123858 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcb82" event={"ID":"6f9f5b30-6ffb-433f-a300-b28d79d6d964","Type":"ContainerStarted","Data":"ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a"} Jan 20 16:56:36 crc kubenswrapper[4995]: I0120 16:56:36.147474 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rcb82" podStartSLOduration=3.565244645 podStartE2EDuration="6.147452039s" podCreationTimestamp="2026-01-20 16:56:30 +0000 UTC" firstStartedPulling="2026-01-20 16:56:33.089967738 +0000 UTC m=+1511.334572554" lastFinishedPulling="2026-01-20 16:56:35.672175112 +0000 UTC m=+1513.916779948" observedRunningTime="2026-01-20 16:56:36.145403474 +0000 UTC m=+1514.390008280" watchObservedRunningTime="2026-01-20 16:56:36.147452039 +0000 UTC m=+1514.392056845" Jan 20 16:56:41 crc kubenswrapper[4995]: I0120 16:56:41.142906 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:41 crc kubenswrapper[4995]: I0120 16:56:41.143566 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:41 crc kubenswrapper[4995]: I0120 16:56:41.229560 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:41 crc kubenswrapper[4995]: I0120 16:56:41.336929 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:41 crc kubenswrapper[4995]: I0120 16:56:41.528568 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rcb82"] Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.205668 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rcb82" podUID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerName="registry-server" containerID="cri-o://ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a" gracePeriod=2 Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.774392 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.865819 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96c8f\" (UniqueName: \"kubernetes.io/projected/6f9f5b30-6ffb-433f-a300-b28d79d6d964-kube-api-access-96c8f\") pod \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.866049 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-utilities\") pod \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.866165 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-catalog-content\") pod \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\" (UID: \"6f9f5b30-6ffb-433f-a300-b28d79d6d964\") " Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.867174 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-utilities" (OuterVolumeSpecName: "utilities") pod "6f9f5b30-6ffb-433f-a300-b28d79d6d964" (UID: "6f9f5b30-6ffb-433f-a300-b28d79d6d964"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.875264 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f9f5b30-6ffb-433f-a300-b28d79d6d964-kube-api-access-96c8f" (OuterVolumeSpecName: "kube-api-access-96c8f") pod "6f9f5b30-6ffb-433f-a300-b28d79d6d964" (UID: "6f9f5b30-6ffb-433f-a300-b28d79d6d964"). InnerVolumeSpecName "kube-api-access-96c8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.931587 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f9f5b30-6ffb-433f-a300-b28d79d6d964" (UID: "6f9f5b30-6ffb-433f-a300-b28d79d6d964"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.968459 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.968498 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f9f5b30-6ffb-433f-a300-b28d79d6d964-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 16:56:43 crc kubenswrapper[4995]: I0120 16:56:43.968511 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96c8f\" (UniqueName: \"kubernetes.io/projected/6f9f5b30-6ffb-433f-a300-b28d79d6d964-kube-api-access-96c8f\") on node \"crc\" DevicePath \"\"" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.217616 4995 generic.go:334] "Generic (PLEG): container finished" podID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerID="ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a" exitCode=0 Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.217665 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rcb82" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.217678 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcb82" event={"ID":"6f9f5b30-6ffb-433f-a300-b28d79d6d964","Type":"ContainerDied","Data":"ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a"} Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.218010 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rcb82" event={"ID":"6f9f5b30-6ffb-433f-a300-b28d79d6d964","Type":"ContainerDied","Data":"689b07f13686962d1d911862c730337158aaaba15b81fb95e20f01c7a7ed1a76"} Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.218057 4995 scope.go:117] "RemoveContainer" containerID="ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.252490 4995 scope.go:117] "RemoveContainer" containerID="8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.255924 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rcb82"] Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.268480 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rcb82"] Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.285020 4995 scope.go:117] "RemoveContainer" containerID="4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.350673 4995 scope.go:117] "RemoveContainer" containerID="ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a" Jan 20 16:56:44 crc kubenswrapper[4995]: E0120 16:56:44.351437 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a\": container with ID starting with ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a not found: ID does not exist" containerID="ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.351616 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a"} err="failed to get container status \"ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a\": rpc error: code = NotFound desc = could not find container \"ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a\": container with ID starting with ac1234b664594c1134a854185b8cb5aec426bc1e6b3f259d14137b7a7f80982a not found: ID does not exist" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.351751 4995 scope.go:117] "RemoveContainer" containerID="8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b" Jan 20 16:56:44 crc kubenswrapper[4995]: E0120 16:56:44.352244 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b\": container with ID starting with 8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b not found: ID does not exist" containerID="8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.352376 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b"} err="failed to get container status \"8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b\": rpc error: code = NotFound desc = could not find container \"8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b\": container with ID starting with 8b8c8753f408461c588f1220295e294034bf9bb16c1cc8b2e70235eedcd92e8b not found: ID does not exist" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.352487 4995 scope.go:117] "RemoveContainer" containerID="4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794" Jan 20 16:56:44 crc kubenswrapper[4995]: E0120 16:56:44.352915 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794\": container with ID starting with 4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794 not found: ID does not exist" containerID="4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794" Jan 20 16:56:44 crc kubenswrapper[4995]: I0120 16:56:44.353037 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794"} err="failed to get container status \"4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794\": rpc error: code = NotFound desc = could not find container \"4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794\": container with ID starting with 4583b7c966c505cf3c415bd43f69f74040c2c1ef2e3f229a0aac4284159fb794 not found: ID does not exist" Jan 20 16:56:46 crc kubenswrapper[4995]: I0120 16:56:46.002823 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" path="/var/lib/kubelet/pods/6f9f5b30-6ffb-433f-a300-b28d79d6d964/volumes" Jan 20 16:57:00 crc kubenswrapper[4995]: I0120 16:57:00.571192 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 16:57:00 crc kubenswrapper[4995]: I0120 16:57:00.571781 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 16:57:00 crc kubenswrapper[4995]: I0120 16:57:00.571824 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 16:57:00 crc kubenswrapper[4995]: I0120 16:57:00.572523 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 16:57:00 crc kubenswrapper[4995]: I0120 16:57:00.572580 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" gracePeriod=600 Jan 20 16:57:00 crc kubenswrapper[4995]: E0120 16:57:00.700325 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:57:01 crc kubenswrapper[4995]: I0120 16:57:01.429871 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" exitCode=0 Jan 20 16:57:01 crc kubenswrapper[4995]: I0120 16:57:01.430127 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954"} Jan 20 16:57:01 crc kubenswrapper[4995]: I0120 16:57:01.430368 4995 scope.go:117] "RemoveContainer" containerID="e4ec06a3af3d63376517d75d9eacbb252d52f03f8933ff215b7181152846db60" Jan 20 16:57:01 crc kubenswrapper[4995]: I0120 16:57:01.431004 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:57:01 crc kubenswrapper[4995]: E0120 16:57:01.431315 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:57:13 crc kubenswrapper[4995]: I0120 16:57:13.989659 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:57:13 crc kubenswrapper[4995]: E0120 16:57:13.990741 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:57:26 crc kubenswrapper[4995]: I0120 16:57:26.991394 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:57:26 crc kubenswrapper[4995]: E0120 16:57:26.992946 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:57:40 crc kubenswrapper[4995]: I0120 16:57:40.989351 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:57:40 crc kubenswrapper[4995]: E0120 16:57:40.990157 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:57:52 crc kubenswrapper[4995]: I0120 16:57:52.989924 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:57:52 crc kubenswrapper[4995]: E0120 16:57:52.990674 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:58:06 crc kubenswrapper[4995]: I0120 16:58:06.990032 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:58:06 crc kubenswrapper[4995]: E0120 16:58:06.992404 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:58:19 crc kubenswrapper[4995]: I0120 16:58:19.990062 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:58:19 crc kubenswrapper[4995]: E0120 16:58:19.990785 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:58:31 crc kubenswrapper[4995]: I0120 16:58:31.996462 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:58:31 crc kubenswrapper[4995]: E0120 16:58:31.997320 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:58:44 crc kubenswrapper[4995]: I0120 16:58:44.989589 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:58:44 crc kubenswrapper[4995]: E0120 16:58:44.991352 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:58:54 crc kubenswrapper[4995]: I0120 16:58:54.741519 4995 generic.go:334] "Generic (PLEG): container finished" podID="7426fa32-40ee-4b5e-9d5a-962505929c91" containerID="f53dd95ce9dcac7036479133b31625c82284cea3893b3849b9ab93e130e77db0" exitCode=0 Jan 20 16:58:54 crc kubenswrapper[4995]: I0120 16:58:54.741890 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" event={"ID":"7426fa32-40ee-4b5e-9d5a-962505929c91","Type":"ContainerDied","Data":"f53dd95ce9dcac7036479133b31625c82284cea3893b3849b9ab93e130e77db0"} Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.150388 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.264229 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-ssh-key-openstack-edpm-ipam\") pod \"7426fa32-40ee-4b5e-9d5a-962505929c91\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.264350 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-bootstrap-combined-ca-bundle\") pod \"7426fa32-40ee-4b5e-9d5a-962505929c91\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.264427 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-inventory\") pod \"7426fa32-40ee-4b5e-9d5a-962505929c91\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.264474 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44m8j\" (UniqueName: \"kubernetes.io/projected/7426fa32-40ee-4b5e-9d5a-962505929c91-kube-api-access-44m8j\") pod \"7426fa32-40ee-4b5e-9d5a-962505929c91\" (UID: \"7426fa32-40ee-4b5e-9d5a-962505929c91\") " Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.270245 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7426fa32-40ee-4b5e-9d5a-962505929c91-kube-api-access-44m8j" (OuterVolumeSpecName: "kube-api-access-44m8j") pod "7426fa32-40ee-4b5e-9d5a-962505929c91" (UID: "7426fa32-40ee-4b5e-9d5a-962505929c91"). InnerVolumeSpecName "kube-api-access-44m8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.270327 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "7426fa32-40ee-4b5e-9d5a-962505929c91" (UID: "7426fa32-40ee-4b5e-9d5a-962505929c91"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.292446 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-inventory" (OuterVolumeSpecName: "inventory") pod "7426fa32-40ee-4b5e-9d5a-962505929c91" (UID: "7426fa32-40ee-4b5e-9d5a-962505929c91"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.294704 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7426fa32-40ee-4b5e-9d5a-962505929c91" (UID: "7426fa32-40ee-4b5e-9d5a-962505929c91"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.366607 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.366822 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44m8j\" (UniqueName: \"kubernetes.io/projected/7426fa32-40ee-4b5e-9d5a-962505929c91-kube-api-access-44m8j\") on node \"crc\" DevicePath \"\"" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.366891 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.366952 4995 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7426fa32-40ee-4b5e-9d5a-962505929c91-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.764156 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" event={"ID":"7426fa32-40ee-4b5e-9d5a-962505929c91","Type":"ContainerDied","Data":"c4246434a3a3c8d499c286d6bbf90a5b8a684a293db4eaec34acdda51b6960c7"} Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.764198 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-g484b" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.764200 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4246434a3a3c8d499c286d6bbf90a5b8a684a293db4eaec34acdda51b6960c7" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.943840 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq"] Jan 20 16:58:56 crc kubenswrapper[4995]: E0120 16:58:56.945598 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerName="extract-content" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.945622 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerName="extract-content" Jan 20 16:58:56 crc kubenswrapper[4995]: E0120 16:58:56.945652 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerName="registry-server" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.945661 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerName="registry-server" Jan 20 16:58:56 crc kubenswrapper[4995]: E0120 16:58:56.945691 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerName="extract-utilities" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.945701 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerName="extract-utilities" Jan 20 16:58:56 crc kubenswrapper[4995]: E0120 16:58:56.945711 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7426fa32-40ee-4b5e-9d5a-962505929c91" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.945720 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7426fa32-40ee-4b5e-9d5a-962505929c91" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.945955 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f9f5b30-6ffb-433f-a300-b28d79d6d964" containerName="registry-server" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.945990 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="7426fa32-40ee-4b5e-9d5a-962505929c91" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.946756 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq"] Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.946849 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.954580 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.954641 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.954894 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.955027 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.977278 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.977340 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l2tl\" (UniqueName: \"kubernetes.io/projected/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-kube-api-access-4l2tl\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:56 crc kubenswrapper[4995]: I0120 16:58:56.977362 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.078864 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.079234 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l2tl\" (UniqueName: \"kubernetes.io/projected/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-kube-api-access-4l2tl\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.079356 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.083128 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.083144 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.098769 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l2tl\" (UniqueName: \"kubernetes.io/projected/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-kube-api-access-4l2tl\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.271746 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.807621 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.815630 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq"] Jan 20 16:58:57 crc kubenswrapper[4995]: I0120 16:58:57.989873 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:58:57 crc kubenswrapper[4995]: E0120 16:58:57.990177 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:58:58 crc kubenswrapper[4995]: I0120 16:58:58.784267 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" event={"ID":"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890","Type":"ContainerStarted","Data":"28c0d1adf546dc43447e6dd05af5eb3f5858c975bfb50bf468c78beedf138144"} Jan 20 16:58:58 crc kubenswrapper[4995]: I0120 16:58:58.784522 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" event={"ID":"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890","Type":"ContainerStarted","Data":"360c426c78db77a5597e3ea005f06b7ec30052f923c7319a7f1fe73fe44866eb"} Jan 20 16:58:58 crc kubenswrapper[4995]: I0120 16:58:58.799512 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" podStartSLOduration=2.239783321 podStartE2EDuration="2.799495273s" podCreationTimestamp="2026-01-20 16:58:56 +0000 UTC" firstStartedPulling="2026-01-20 16:58:57.807414165 +0000 UTC m=+1656.052018971" lastFinishedPulling="2026-01-20 16:58:58.367126107 +0000 UTC m=+1656.611730923" observedRunningTime="2026-01-20 16:58:58.796549012 +0000 UTC m=+1657.041153818" watchObservedRunningTime="2026-01-20 16:58:58.799495273 +0000 UTC m=+1657.044100079" Jan 20 16:59:01 crc kubenswrapper[4995]: I0120 16:59:01.043029 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-195b-account-create-update-v2pzx"] Jan 20 16:59:01 crc kubenswrapper[4995]: I0120 16:59:01.057822 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-77d5-account-create-update-pws59"] Jan 20 16:59:01 crc kubenswrapper[4995]: I0120 16:59:01.067648 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-195b-account-create-update-v2pzx"] Jan 20 16:59:01 crc kubenswrapper[4995]: I0120 16:59:01.077002 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-77d5-account-create-update-pws59"] Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.006586 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51b9bb44-7353-47df-995b-88a44aed4e12" path="/var/lib/kubelet/pods/51b9bb44-7353-47df-995b-88a44aed4e12/volumes" Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.007563 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8806da75-604e-462a-a582-bd0446c83f09" path="/var/lib/kubelet/pods/8806da75-604e-462a-a582-bd0446c83f09/volumes" Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.033822 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-7759-account-create-update-mf5qn"] Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.042671 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-sv4j2"] Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.053785 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-72pdn"] Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.070003 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-j56cw"] Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.072039 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-72pdn"] Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.079901 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-7759-account-create-update-mf5qn"] Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.089788 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-sv4j2"] Jan 20 16:59:02 crc kubenswrapper[4995]: I0120 16:59:02.098847 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-j56cw"] Jan 20 16:59:04 crc kubenswrapper[4995]: I0120 16:59:04.007456 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30229159-0c2b-429a-997f-647d3398832f" path="/var/lib/kubelet/pods/30229159-0c2b-429a-997f-647d3398832f/volumes" Jan 20 16:59:04 crc kubenswrapper[4995]: I0120 16:59:04.008967 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8eceda26-e546-4f1c-b611-18056b30d199" path="/var/lib/kubelet/pods/8eceda26-e546-4f1c-b611-18056b30d199/volumes" Jan 20 16:59:04 crc kubenswrapper[4995]: I0120 16:59:04.009759 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9127f5ea-7402-4155-b70f-2a4d382598ec" path="/var/lib/kubelet/pods/9127f5ea-7402-4155-b70f-2a4d382598ec/volumes" Jan 20 16:59:04 crc kubenswrapper[4995]: I0120 16:59:04.010510 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305" path="/var/lib/kubelet/pods/c4bdf4e5-1255-42cd-89b1-d3f1e2ff4305/volumes" Jan 20 16:59:06 crc kubenswrapper[4995]: I0120 16:59:06.050892 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-5sbj9"] Jan 20 16:59:06 crc kubenswrapper[4995]: I0120 16:59:06.065623 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-c670-account-create-update-nkw7r"] Jan 20 16:59:06 crc kubenswrapper[4995]: I0120 16:59:06.076581 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-5sbj9"] Jan 20 16:59:06 crc kubenswrapper[4995]: I0120 16:59:06.086949 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-c670-account-create-update-nkw7r"] Jan 20 16:59:08 crc kubenswrapper[4995]: I0120 16:59:08.007313 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08e0de87-a951-4bae-9915-9bbc5bf7ece5" path="/var/lib/kubelet/pods/08e0de87-a951-4bae-9915-9bbc5bf7ece5/volumes" Jan 20 16:59:08 crc kubenswrapper[4995]: I0120 16:59:08.008874 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5be9b31-4928-4381-b9df-6dbbbb20ca0c" path="/var/lib/kubelet/pods/d5be9b31-4928-4381-b9df-6dbbbb20ca0c/volumes" Jan 20 16:59:08 crc kubenswrapper[4995]: I0120 16:59:08.990561 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:59:08 crc kubenswrapper[4995]: E0120 16:59:08.990924 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:59:22 crc kubenswrapper[4995]: I0120 16:59:22.989910 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:59:22 crc kubenswrapper[4995]: E0120 16:59:22.990700 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:59:29 crc kubenswrapper[4995]: I0120 16:59:29.053256 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-p9tsn"] Jan 20 16:59:29 crc kubenswrapper[4995]: I0120 16:59:29.066186 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-h9j24"] Jan 20 16:59:29 crc kubenswrapper[4995]: I0120 16:59:29.079477 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-p9tsn"] Jan 20 16:59:29 crc kubenswrapper[4995]: I0120 16:59:29.088481 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-07ce-account-create-update-gkng9"] Jan 20 16:59:29 crc kubenswrapper[4995]: I0120 16:59:29.096354 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-0ab3-account-create-update-rspxp"] Jan 20 16:59:29 crc kubenswrapper[4995]: I0120 16:59:29.104833 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-h9j24"] Jan 20 16:59:29 crc kubenswrapper[4995]: I0120 16:59:29.113740 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-07ce-account-create-update-gkng9"] Jan 20 16:59:29 crc kubenswrapper[4995]: I0120 16:59:29.122662 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-0ab3-account-create-update-rspxp"] Jan 20 16:59:30 crc kubenswrapper[4995]: I0120 16:59:30.007202 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="555925e6-eb4f-4c45-9151-f44a6fee3874" path="/var/lib/kubelet/pods/555925e6-eb4f-4c45-9151-f44a6fee3874/volumes" Jan 20 16:59:30 crc kubenswrapper[4995]: I0120 16:59:30.007903 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a97f544e-7335-4e51-9bfd-9c92bcd12cc6" path="/var/lib/kubelet/pods/a97f544e-7335-4e51-9bfd-9c92bcd12cc6/volumes" Jan 20 16:59:30 crc kubenswrapper[4995]: I0120 16:59:30.008567 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baf5a0b0-860c-4777-bc84-f6dc4a17af4c" path="/var/lib/kubelet/pods/baf5a0b0-860c-4777-bc84-f6dc4a17af4c/volumes" Jan 20 16:59:30 crc kubenswrapper[4995]: I0120 16:59:30.009259 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="deceef39-8330-4f41-acb1-fbb4ee4f7d80" path="/var/lib/kubelet/pods/deceef39-8330-4f41-acb1-fbb4ee4f7d80/volumes" Jan 20 16:59:30 crc kubenswrapper[4995]: I0120 16:59:30.875557 4995 scope.go:117] "RemoveContainer" containerID="c498f7985f5ddf07870c6600a5528663f07a4f083a655568edce4766a64d167c" Jan 20 16:59:30 crc kubenswrapper[4995]: I0120 16:59:30.903930 4995 scope.go:117] "RemoveContainer" containerID="7d241ca40d3bf15210984a72e9bdc49d6cbc76aab1e07ac11aa9bcb17e078fcb" Jan 20 16:59:30 crc kubenswrapper[4995]: I0120 16:59:30.950570 4995 scope.go:117] "RemoveContainer" containerID="97818658943d0c9c4b4ed1df2b1278e3f8dfe0d282ab3724994c2ec013c7d058" Jan 20 16:59:30 crc kubenswrapper[4995]: I0120 16:59:30.991941 4995 scope.go:117] "RemoveContainer" containerID="fc53de352d119f2763437fd1e330416bdd9e71b2df28d57fb606d2f1091ee0dd" Jan 20 16:59:31 crc kubenswrapper[4995]: I0120 16:59:31.041590 4995 scope.go:117] "RemoveContainer" containerID="544504c1fcb266975a10bf1fdcc8d5031f2f78a3bbeb6e0ee2a8d2d603ed8d6c" Jan 20 16:59:31 crc kubenswrapper[4995]: I0120 16:59:31.086804 4995 scope.go:117] "RemoveContainer" containerID="38d764a363ff2a902a260a9025f3bd3a1e3cf7e0b118ce276110b78e179a79d1" Jan 20 16:59:31 crc kubenswrapper[4995]: I0120 16:59:31.118569 4995 scope.go:117] "RemoveContainer" containerID="f7661154b178ed9c11b44bc425dc43803dd061f2132875a69ac65a72784f2c2d" Jan 20 16:59:31 crc kubenswrapper[4995]: I0120 16:59:31.142719 4995 scope.go:117] "RemoveContainer" containerID="c9134720c2b281de70679f3892f95322c74a07648ce04c54c86d962bb59e5c2c" Jan 20 16:59:31 crc kubenswrapper[4995]: I0120 16:59:31.161550 4995 scope.go:117] "RemoveContainer" containerID="3a4aba26cb1810f15c420f4f9a2e770d9c9c0522ab039ca5a011f2fb01ccf9c5" Jan 20 16:59:31 crc kubenswrapper[4995]: I0120 16:59:31.183243 4995 scope.go:117] "RemoveContainer" containerID="0403347ee00cbd0387e8082ffe31741e537067b2c547b0481045e7732d0c41c0" Jan 20 16:59:31 crc kubenswrapper[4995]: I0120 16:59:31.200186 4995 scope.go:117] "RemoveContainer" containerID="f4f276489b3e827a855e4ff5a98bda8834902cd61645bbfca799795aca95cc9f" Jan 20 16:59:31 crc kubenswrapper[4995]: I0120 16:59:31.220992 4995 scope.go:117] "RemoveContainer" containerID="359af61b0dac4d84d0461fa96ea32ff311bc8f52a31b1f3804cc6658fb436d17" Jan 20 16:59:35 crc kubenswrapper[4995]: I0120 16:59:35.990317 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:59:35 crc kubenswrapper[4995]: E0120 16:59:35.991284 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:59:36 crc kubenswrapper[4995]: I0120 16:59:36.038311 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-mbl5g"] Jan 20 16:59:36 crc kubenswrapper[4995]: I0120 16:59:36.050916 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-ee38-account-create-update-tg46v"] Jan 20 16:59:36 crc kubenswrapper[4995]: I0120 16:59:36.059893 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-mbl5g"] Jan 20 16:59:36 crc kubenswrapper[4995]: I0120 16:59:36.068713 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-ee38-account-create-update-tg46v"] Jan 20 16:59:37 crc kubenswrapper[4995]: I0120 16:59:37.036669 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-jdpl9"] Jan 20 16:59:37 crc kubenswrapper[4995]: I0120 16:59:37.046465 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-jdpl9"] Jan 20 16:59:38 crc kubenswrapper[4995]: I0120 16:59:38.001263 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2afb13bf-8898-4ec7-b9f1-036467eec7fd" path="/var/lib/kubelet/pods/2afb13bf-8898-4ec7-b9f1-036467eec7fd/volumes" Jan 20 16:59:38 crc kubenswrapper[4995]: I0120 16:59:38.002472 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ecfbb2f-250c-4484-a20f-f45dce557abc" path="/var/lib/kubelet/pods/6ecfbb2f-250c-4484-a20f-f45dce557abc/volumes" Jan 20 16:59:38 crc kubenswrapper[4995]: I0120 16:59:38.003614 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd799892-ab5d-43c3-aa1e-df5407ed9d7c" path="/var/lib/kubelet/pods/cd799892-ab5d-43c3-aa1e-df5407ed9d7c/volumes" Jan 20 16:59:49 crc kubenswrapper[4995]: I0120 16:59:49.989616 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 16:59:49 crc kubenswrapper[4995]: E0120 16:59:49.991986 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 16:59:50 crc kubenswrapper[4995]: I0120 16:59:50.050672 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-f68pw"] Jan 20 16:59:50 crc kubenswrapper[4995]: I0120 16:59:50.062786 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-f68pw"] Jan 20 16:59:51 crc kubenswrapper[4995]: I0120 16:59:51.999514 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="473df9a8-e4eb-44ed-a6c8-2e7c815063ba" path="/var/lib/kubelet/pods/473df9a8-e4eb-44ed-a6c8-2e7c815063ba/volumes" Jan 20 16:59:53 crc kubenswrapper[4995]: I0120 16:59:53.030924 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-j7gvz"] Jan 20 16:59:53 crc kubenswrapper[4995]: I0120 16:59:53.042183 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-j7gvz"] Jan 20 16:59:54 crc kubenswrapper[4995]: I0120 16:59:54.005889 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17f5be4a-fe5c-414f-b2af-3e06500135ba" path="/var/lib/kubelet/pods/17f5be4a-fe5c-414f-b2af-3e06500135ba/volumes" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.158591 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9"] Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.161415 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.164456 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.164743 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.173989 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9"] Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.270400 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-config-volume\") pod \"collect-profiles-29482140-f7tf9\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.271306 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-secret-volume\") pod \"collect-profiles-29482140-f7tf9\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.271504 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tzj5\" (UniqueName: \"kubernetes.io/projected/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-kube-api-access-4tzj5\") pod \"collect-profiles-29482140-f7tf9\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.373594 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-secret-volume\") pod \"collect-profiles-29482140-f7tf9\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.373719 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tzj5\" (UniqueName: \"kubernetes.io/projected/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-kube-api-access-4tzj5\") pod \"collect-profiles-29482140-f7tf9\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.373807 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-config-volume\") pod \"collect-profiles-29482140-f7tf9\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.375242 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-config-volume\") pod \"collect-profiles-29482140-f7tf9\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.389594 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-secret-volume\") pod \"collect-profiles-29482140-f7tf9\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.405257 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tzj5\" (UniqueName: \"kubernetes.io/projected/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-kube-api-access-4tzj5\") pod \"collect-profiles-29482140-f7tf9\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.482751 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:00 crc kubenswrapper[4995]: I0120 17:00:00.982965 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9"] Jan 20 17:00:01 crc kubenswrapper[4995]: I0120 17:00:01.510022 4995 generic.go:334] "Generic (PLEG): container finished" podID="01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee" containerID="a0440b2bc16b41337f9949292a83a62e98dba09f273ab8edcb2663c4f44eff58" exitCode=0 Jan 20 17:00:01 crc kubenswrapper[4995]: I0120 17:00:01.510393 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" event={"ID":"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee","Type":"ContainerDied","Data":"a0440b2bc16b41337f9949292a83a62e98dba09f273ab8edcb2663c4f44eff58"} Jan 20 17:00:01 crc kubenswrapper[4995]: I0120 17:00:01.510424 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" event={"ID":"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee","Type":"ContainerStarted","Data":"20851c576c5ae7ced42d909c3d49f7376c89915e1f4abcb7a8e5171430433aaa"} Jan 20 17:00:02 crc kubenswrapper[4995]: I0120 17:00:02.917225 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.032261 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tzj5\" (UniqueName: \"kubernetes.io/projected/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-kube-api-access-4tzj5\") pod \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.032580 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-config-volume\") pod \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.032617 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-secret-volume\") pod \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\" (UID: \"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee\") " Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.033164 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-config-volume" (OuterVolumeSpecName: "config-volume") pod "01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee" (UID: "01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.033870 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.039885 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee" (UID: "01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.041303 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-kube-api-access-4tzj5" (OuterVolumeSpecName: "kube-api-access-4tzj5") pod "01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee" (UID: "01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee"). InnerVolumeSpecName "kube-api-access-4tzj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.136429 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.136474 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tzj5\" (UniqueName: \"kubernetes.io/projected/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee-kube-api-access-4tzj5\") on node \"crc\" DevicePath \"\"" Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.539068 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" event={"ID":"01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee","Type":"ContainerDied","Data":"20851c576c5ae7ced42d909c3d49f7376c89915e1f4abcb7a8e5171430433aaa"} Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.539206 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20851c576c5ae7ced42d909c3d49f7376c89915e1f4abcb7a8e5171430433aaa" Jan 20 17:00:03 crc kubenswrapper[4995]: I0120 17:00:03.539143 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9" Jan 20 17:00:04 crc kubenswrapper[4995]: I0120 17:00:04.279632 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:00:04 crc kubenswrapper[4995]: E0120 17:00:04.280210 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:00:17 crc kubenswrapper[4995]: I0120 17:00:17.990685 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:00:17 crc kubenswrapper[4995]: E0120 17:00:17.991910 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:00:25 crc kubenswrapper[4995]: I0120 17:00:25.045922 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-mzctz"] Jan 20 17:00:25 crc kubenswrapper[4995]: I0120 17:00:25.054331 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-mzctz"] Jan 20 17:00:26 crc kubenswrapper[4995]: I0120 17:00:26.008168 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8a61b44-4464-497c-881e-bdc0d9063bd9" path="/var/lib/kubelet/pods/c8a61b44-4464-497c-881e-bdc0d9063bd9/volumes" Jan 20 17:00:31 crc kubenswrapper[4995]: I0120 17:00:31.454301 4995 scope.go:117] "RemoveContainer" containerID="d2d4f341556f481ad7e5304853ff66e49b89bc2b9ed38f644d8d2401c9ed629c" Jan 20 17:00:31 crc kubenswrapper[4995]: I0120 17:00:31.575488 4995 scope.go:117] "RemoveContainer" containerID="2a8cf128193a25a11de2bf8c2c472de613c727db9a2407030beaa669f491cede" Jan 20 17:00:31 crc kubenswrapper[4995]: I0120 17:00:31.612321 4995 scope.go:117] "RemoveContainer" containerID="c86e63d95c4134414cc169f261f0f35f588248a3f48b521d54507b8864a03568" Jan 20 17:00:31 crc kubenswrapper[4995]: I0120 17:00:31.672094 4995 scope.go:117] "RemoveContainer" containerID="c0596506311edc958891b00a4e638a168c9bd118c6a8a46b534b53a4617491b3" Jan 20 17:00:31 crc kubenswrapper[4995]: I0120 17:00:31.722947 4995 scope.go:117] "RemoveContainer" containerID="e6ee1ad2b9244080d618f0cad4d19e5883b2c3953baad4040c3c13f12ec62081" Jan 20 17:00:31 crc kubenswrapper[4995]: I0120 17:00:31.765954 4995 scope.go:117] "RemoveContainer" containerID="81d167fedd3c0d73e6e9e8e54faca62bc2b909076389f6f205cb9d888ab130e9" Jan 20 17:00:32 crc kubenswrapper[4995]: I0120 17:00:32.007992 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:00:32 crc kubenswrapper[4995]: E0120 17:00:32.008448 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:00:38 crc kubenswrapper[4995]: I0120 17:00:38.061069 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-ncfj6"] Jan 20 17:00:38 crc kubenswrapper[4995]: I0120 17:00:38.071921 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-ncfj6"] Jan 20 17:00:38 crc kubenswrapper[4995]: I0120 17:00:38.085625 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-4zjlv"] Jan 20 17:00:38 crc kubenswrapper[4995]: I0120 17:00:38.096760 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-shkpd"] Jan 20 17:00:38 crc kubenswrapper[4995]: I0120 17:00:38.104141 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-4zjlv"] Jan 20 17:00:38 crc kubenswrapper[4995]: I0120 17:00:38.111195 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-shkpd"] Jan 20 17:00:40 crc kubenswrapper[4995]: I0120 17:00:40.010510 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40362747-51a2-473e-845c-3427003b9b7a" path="/var/lib/kubelet/pods/40362747-51a2-473e-845c-3427003b9b7a/volumes" Jan 20 17:00:40 crc kubenswrapper[4995]: I0120 17:00:40.011457 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5603781-3cf3-41db-bfc7-7dc74d244fd4" path="/var/lib/kubelet/pods/e5603781-3cf3-41db-bfc7-7dc74d244fd4/volumes" Jan 20 17:00:40 crc kubenswrapper[4995]: I0120 17:00:40.012984 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eabde76f-904d-4313-8013-f17d65cc178f" path="/var/lib/kubelet/pods/eabde76f-904d-4313-8013-f17d65cc178f/volumes" Jan 20 17:00:45 crc kubenswrapper[4995]: I0120 17:00:45.989827 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:00:45 crc kubenswrapper[4995]: E0120 17:00:45.990849 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:00:48 crc kubenswrapper[4995]: I0120 17:00:48.038507 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-cc4hb"] Jan 20 17:00:48 crc kubenswrapper[4995]: I0120 17:00:48.049710 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-cc4hb"] Jan 20 17:00:50 crc kubenswrapper[4995]: I0120 17:00:50.009427 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cb81edf-880e-421a-bc37-258db15b1ad9" path="/var/lib/kubelet/pods/6cb81edf-880e-421a-bc37-258db15b1ad9/volumes" Jan 20 17:00:51 crc kubenswrapper[4995]: I0120 17:00:51.061942 4995 generic.go:334] "Generic (PLEG): container finished" podID="d73cf0b7-6fb8-4b4c-b6bd-acb174f44890" containerID="28c0d1adf546dc43447e6dd05af5eb3f5858c975bfb50bf468c78beedf138144" exitCode=0 Jan 20 17:00:51 crc kubenswrapper[4995]: I0120 17:00:51.062002 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" event={"ID":"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890","Type":"ContainerDied","Data":"28c0d1adf546dc43447e6dd05af5eb3f5858c975bfb50bf468c78beedf138144"} Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.516701 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.691388 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4l2tl\" (UniqueName: \"kubernetes.io/projected/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-kube-api-access-4l2tl\") pod \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.691474 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-inventory\") pod \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.691587 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-ssh-key-openstack-edpm-ipam\") pod \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\" (UID: \"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890\") " Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.697463 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-kube-api-access-4l2tl" (OuterVolumeSpecName: "kube-api-access-4l2tl") pod "d73cf0b7-6fb8-4b4c-b6bd-acb174f44890" (UID: "d73cf0b7-6fb8-4b4c-b6bd-acb174f44890"). InnerVolumeSpecName "kube-api-access-4l2tl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.732971 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-inventory" (OuterVolumeSpecName: "inventory") pod "d73cf0b7-6fb8-4b4c-b6bd-acb174f44890" (UID: "d73cf0b7-6fb8-4b4c-b6bd-acb174f44890"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.743295 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d73cf0b7-6fb8-4b4c-b6bd-acb174f44890" (UID: "d73cf0b7-6fb8-4b4c-b6bd-acb174f44890"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.794029 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4l2tl\" (UniqueName: \"kubernetes.io/projected/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-kube-api-access-4l2tl\") on node \"crc\" DevicePath \"\"" Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.794113 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:00:52 crc kubenswrapper[4995]: I0120 17:00:52.794126 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d73cf0b7-6fb8-4b4c-b6bd-acb174f44890-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.089968 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" event={"ID":"d73cf0b7-6fb8-4b4c-b6bd-acb174f44890","Type":"ContainerDied","Data":"360c426c78db77a5597e3ea005f06b7ec30052f923c7319a7f1fe73fe44866eb"} Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.090017 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="360c426c78db77a5597e3ea005f06b7ec30052f923c7319a7f1fe73fe44866eb" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.090211 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.171432 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h"] Jan 20 17:00:53 crc kubenswrapper[4995]: E0120 17:00:53.172128 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d73cf0b7-6fb8-4b4c-b6bd-acb174f44890" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.172155 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d73cf0b7-6fb8-4b4c-b6bd-acb174f44890" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 20 17:00:53 crc kubenswrapper[4995]: E0120 17:00:53.172197 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee" containerName="collect-profiles" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.172210 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee" containerName="collect-profiles" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.172574 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee" containerName="collect-profiles" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.172611 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="d73cf0b7-6fb8-4b4c-b6bd-acb174f44890" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.173741 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.175619 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.175962 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.176424 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.179265 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.182058 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h"] Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.302806 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t744h\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.302927 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t744h\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.302967 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5m7n\" (UniqueName: \"kubernetes.io/projected/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-kube-api-access-l5m7n\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t744h\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.404481 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t744h\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.404579 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5m7n\" (UniqueName: \"kubernetes.io/projected/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-kube-api-access-l5m7n\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t744h\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.404710 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t744h\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.408310 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t744h\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.408488 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t744h\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.438063 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5m7n\" (UniqueName: \"kubernetes.io/projected/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-kube-api-access-l5m7n\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-t744h\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:53 crc kubenswrapper[4995]: I0120 17:00:53.504412 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:00:54 crc kubenswrapper[4995]: I0120 17:00:54.052598 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h"] Jan 20 17:00:54 crc kubenswrapper[4995]: W0120 17:00:54.063412 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d2d90c1_a32f_4ec1_82e9_4d4440542e43.slice/crio-4fa68e7b96d734aaa92f4a0a7c4911009c09127a02ca50a8911bbe90df7e3045 WatchSource:0}: Error finding container 4fa68e7b96d734aaa92f4a0a7c4911009c09127a02ca50a8911bbe90df7e3045: Status 404 returned error can't find the container with id 4fa68e7b96d734aaa92f4a0a7c4911009c09127a02ca50a8911bbe90df7e3045 Jan 20 17:00:54 crc kubenswrapper[4995]: I0120 17:00:54.109205 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" event={"ID":"6d2d90c1-a32f-4ec1-82e9-4d4440542e43","Type":"ContainerStarted","Data":"4fa68e7b96d734aaa92f4a0a7c4911009c09127a02ca50a8911bbe90df7e3045"} Jan 20 17:00:56 crc kubenswrapper[4995]: I0120 17:00:56.129632 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" event={"ID":"6d2d90c1-a32f-4ec1-82e9-4d4440542e43","Type":"ContainerStarted","Data":"c834ed707242b2936a40903d385e76bdd066f4b7d160a8cc32461e92335ce299"} Jan 20 17:00:56 crc kubenswrapper[4995]: I0120 17:00:56.153895 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" podStartSLOduration=2.137354744 podStartE2EDuration="3.153867352s" podCreationTimestamp="2026-01-20 17:00:53 +0000 UTC" firstStartedPulling="2026-01-20 17:00:54.067483091 +0000 UTC m=+1772.312087927" lastFinishedPulling="2026-01-20 17:00:55.083995729 +0000 UTC m=+1773.328600535" observedRunningTime="2026-01-20 17:00:56.145945258 +0000 UTC m=+1774.390550094" watchObservedRunningTime="2026-01-20 17:00:56.153867352 +0000 UTC m=+1774.398472168" Jan 20 17:00:58 crc kubenswrapper[4995]: I0120 17:00:58.044725 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-hmlm7"] Jan 20 17:00:58 crc kubenswrapper[4995]: I0120 17:00:58.053645 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-hmlm7"] Jan 20 17:00:59 crc kubenswrapper[4995]: I0120 17:00:59.989712 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:00:59 crc kubenswrapper[4995]: E0120 17:00:59.990697 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.002951 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53794c82-829c-4b77-b902-01be2130f0b8" path="/var/lib/kubelet/pods/53794c82-829c-4b77-b902-01be2130f0b8/volumes" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.150175 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29482141-gpst2"] Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.151525 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.160709 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29482141-gpst2"] Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.340551 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pw49\" (UniqueName: \"kubernetes.io/projected/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-kube-api-access-6pw49\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.340688 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-fernet-keys\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.340761 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-combined-ca-bundle\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.340822 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-config-data\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.443111 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pw49\" (UniqueName: \"kubernetes.io/projected/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-kube-api-access-6pw49\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.443200 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-fernet-keys\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.443253 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-combined-ca-bundle\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.443329 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-config-data\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.451781 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-combined-ca-bundle\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.452617 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-config-data\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.462892 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-fernet-keys\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.471474 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pw49\" (UniqueName: \"kubernetes.io/projected/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-kube-api-access-6pw49\") pod \"keystone-cron-29482141-gpst2\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.480116 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:00 crc kubenswrapper[4995]: I0120 17:01:00.940445 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29482141-gpst2"] Jan 20 17:01:01 crc kubenswrapper[4995]: I0120 17:01:01.172577 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482141-gpst2" event={"ID":"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601","Type":"ContainerStarted","Data":"2c2db2183f53302413c942162ea5b123eeacd1fb22189efc5f419b092f24343e"} Jan 20 17:01:01 crc kubenswrapper[4995]: I0120 17:01:01.174067 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482141-gpst2" event={"ID":"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601","Type":"ContainerStarted","Data":"b2435e9158ccbcb6c181ed5c1b076d2a6fad602a1f92302f3c83b5b4cc9a7095"} Jan 20 17:01:01 crc kubenswrapper[4995]: I0120 17:01:01.187469 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29482141-gpst2" podStartSLOduration=1.187448229 podStartE2EDuration="1.187448229s" podCreationTimestamp="2026-01-20 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 17:01:01.186869353 +0000 UTC m=+1779.431474189" watchObservedRunningTime="2026-01-20 17:01:01.187448229 +0000 UTC m=+1779.432053035" Jan 20 17:01:04 crc kubenswrapper[4995]: I0120 17:01:04.198731 4995 generic.go:334] "Generic (PLEG): container finished" podID="f2050c7c-ffc8-4deb-89d8-f6cc0ee15601" containerID="2c2db2183f53302413c942162ea5b123eeacd1fb22189efc5f419b092f24343e" exitCode=0 Jan 20 17:01:04 crc kubenswrapper[4995]: I0120 17:01:04.198774 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482141-gpst2" event={"ID":"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601","Type":"ContainerDied","Data":"2c2db2183f53302413c942162ea5b123eeacd1fb22189efc5f419b092f24343e"} Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.618223 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.766602 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-combined-ca-bundle\") pod \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.767110 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pw49\" (UniqueName: \"kubernetes.io/projected/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-kube-api-access-6pw49\") pod \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.767253 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-config-data\") pod \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.767308 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-fernet-keys\") pod \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\" (UID: \"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601\") " Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.782456 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-kube-api-access-6pw49" (OuterVolumeSpecName: "kube-api-access-6pw49") pod "f2050c7c-ffc8-4deb-89d8-f6cc0ee15601" (UID: "f2050c7c-ffc8-4deb-89d8-f6cc0ee15601"). InnerVolumeSpecName "kube-api-access-6pw49". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.783586 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "f2050c7c-ffc8-4deb-89d8-f6cc0ee15601" (UID: "f2050c7c-ffc8-4deb-89d8-f6cc0ee15601"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.803244 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f2050c7c-ffc8-4deb-89d8-f6cc0ee15601" (UID: "f2050c7c-ffc8-4deb-89d8-f6cc0ee15601"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.825377 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-config-data" (OuterVolumeSpecName: "config-data") pod "f2050c7c-ffc8-4deb-89d8-f6cc0ee15601" (UID: "f2050c7c-ffc8-4deb-89d8-f6cc0ee15601"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.870324 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.870371 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pw49\" (UniqueName: \"kubernetes.io/projected/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-kube-api-access-6pw49\") on node \"crc\" DevicePath \"\"" Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.870384 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 17:01:05 crc kubenswrapper[4995]: I0120 17:01:05.870396 4995 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f2050c7c-ffc8-4deb-89d8-f6cc0ee15601-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 20 17:01:06 crc kubenswrapper[4995]: I0120 17:01:06.222039 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482141-gpst2" event={"ID":"f2050c7c-ffc8-4deb-89d8-f6cc0ee15601","Type":"ContainerDied","Data":"b2435e9158ccbcb6c181ed5c1b076d2a6fad602a1f92302f3c83b5b4cc9a7095"} Jan 20 17:01:06 crc kubenswrapper[4995]: I0120 17:01:06.222093 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2435e9158ccbcb6c181ed5c1b076d2a6fad602a1f92302f3c83b5b4cc9a7095" Jan 20 17:01:06 crc kubenswrapper[4995]: I0120 17:01:06.222154 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482141-gpst2" Jan 20 17:01:10 crc kubenswrapper[4995]: I0120 17:01:10.989878 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:01:10 crc kubenswrapper[4995]: E0120 17:01:10.993113 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:01:22 crc kubenswrapper[4995]: I0120 17:01:22.068579 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-x6v46"] Jan 20 17:01:22 crc kubenswrapper[4995]: I0120 17:01:22.080590 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-7z5wh"] Jan 20 17:01:22 crc kubenswrapper[4995]: I0120 17:01:22.089509 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-x6v46"] Jan 20 17:01:22 crc kubenswrapper[4995]: I0120 17:01:22.097117 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-z4fhw"] Jan 20 17:01:22 crc kubenswrapper[4995]: I0120 17:01:22.105508 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-7z5wh"] Jan 20 17:01:22 crc kubenswrapper[4995]: I0120 17:01:22.114107 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-z4fhw"] Jan 20 17:01:23 crc kubenswrapper[4995]: I0120 17:01:23.041037 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-5951-account-create-update-gj2vm"] Jan 20 17:01:23 crc kubenswrapper[4995]: I0120 17:01:23.053248 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-d1ac-account-create-update-5v47x"] Jan 20 17:01:23 crc kubenswrapper[4995]: I0120 17:01:23.075160 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-d1ac-account-create-update-5v47x"] Jan 20 17:01:23 crc kubenswrapper[4995]: I0120 17:01:23.085528 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-5951-account-create-update-gj2vm"] Jan 20 17:01:23 crc kubenswrapper[4995]: I0120 17:01:23.122505 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-d336-account-create-update-vv9vm"] Jan 20 17:01:23 crc kubenswrapper[4995]: I0120 17:01:23.127863 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-d336-account-create-update-vv9vm"] Jan 20 17:01:23 crc kubenswrapper[4995]: I0120 17:01:23.990542 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:01:23 crc kubenswrapper[4995]: E0120 17:01:23.991282 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:01:24 crc kubenswrapper[4995]: I0120 17:01:24.004879 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="135aaf1f-990f-4d8b-bde7-32f2ddb702b4" path="/var/lib/kubelet/pods/135aaf1f-990f-4d8b-bde7-32f2ddb702b4/volumes" Jan 20 17:01:24 crc kubenswrapper[4995]: I0120 17:01:24.006325 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="782c8f7d-5d0a-4221-a7e4-73b8aab7d361" path="/var/lib/kubelet/pods/782c8f7d-5d0a-4221-a7e4-73b8aab7d361/volumes" Jan 20 17:01:24 crc kubenswrapper[4995]: I0120 17:01:24.007474 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="845fe01d-392a-49a0-bfea-0270f2703739" path="/var/lib/kubelet/pods/845fe01d-392a-49a0-bfea-0270f2703739/volumes" Jan 20 17:01:24 crc kubenswrapper[4995]: I0120 17:01:24.008695 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4c73453-c3fc-46b3-8a8a-ddd134348b8e" path="/var/lib/kubelet/pods/a4c73453-c3fc-46b3-8a8a-ddd134348b8e/volumes" Jan 20 17:01:24 crc kubenswrapper[4995]: I0120 17:01:24.010983 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db16d488-a0a4-4fd7-8662-c42fcd147308" path="/var/lib/kubelet/pods/db16d488-a0a4-4fd7-8662-c42fcd147308/volumes" Jan 20 17:01:24 crc kubenswrapper[4995]: I0120 17:01:24.012458 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f27b5167-a8db-434f-95f5-12d03504b42b" path="/var/lib/kubelet/pods/f27b5167-a8db-434f-95f5-12d03504b42b/volumes" Jan 20 17:01:31 crc kubenswrapper[4995]: I0120 17:01:31.932700 4995 scope.go:117] "RemoveContainer" containerID="3ae81eea5613b74406b7f39c19ae3c6d1485495d5904291e06cf17c9b57a240e" Jan 20 17:01:31 crc kubenswrapper[4995]: I0120 17:01:31.978203 4995 scope.go:117] "RemoveContainer" containerID="654eb3f017d11bd7f624b2107af79990d8d6e127d9e87cc164d494b5f1cac8b5" Jan 20 17:01:32 crc kubenswrapper[4995]: I0120 17:01:32.077608 4995 scope.go:117] "RemoveContainer" containerID="f215ae9fb06e81304fa3500eca8d311d65749c7a2f1e93f9cfcbc6e929427adf" Jan 20 17:01:32 crc kubenswrapper[4995]: I0120 17:01:32.130333 4995 scope.go:117] "RemoveContainer" containerID="6737e76540a1b334227e803b83de5ce933e9cdb414ee98a368a83481c3da1d48" Jan 20 17:01:32 crc kubenswrapper[4995]: I0120 17:01:32.165045 4995 scope.go:117] "RemoveContainer" containerID="4a0fbaa2aa343e2358ea3cb668e556f59b7a3a76a119c69915aa9ccfa185f3fb" Jan 20 17:01:32 crc kubenswrapper[4995]: I0120 17:01:32.209875 4995 scope.go:117] "RemoveContainer" containerID="cc4db8b534ebf1fe6b4679e9246066f633e666f3cf0eb0bc1048bccf4a0334d1" Jan 20 17:01:32 crc kubenswrapper[4995]: I0120 17:01:32.251384 4995 scope.go:117] "RemoveContainer" containerID="44c8c76c8ad789f687dc214079804e8eacb6b94af9d374dc7e2f430cdfcff7a4" Jan 20 17:01:32 crc kubenswrapper[4995]: I0120 17:01:32.281388 4995 scope.go:117] "RemoveContainer" containerID="d61e58090310c325efb19963312ec469e053d810d08cf0d420d70429a17f9d8b" Jan 20 17:01:32 crc kubenswrapper[4995]: I0120 17:01:32.307933 4995 scope.go:117] "RemoveContainer" containerID="9ec7aac2e8e94c48d769d6accde7419a00a7c797d5510c341e670ed955d52888" Jan 20 17:01:32 crc kubenswrapper[4995]: I0120 17:01:32.344747 4995 scope.go:117] "RemoveContainer" containerID="29bf869aa9c0b173a588767f8be2c70c46b4d38c9d6f32d66064158343dde75b" Jan 20 17:01:32 crc kubenswrapper[4995]: I0120 17:01:32.376049 4995 scope.go:117] "RemoveContainer" containerID="32f5af5984a6bd6bbe66712a3b4d52b4fb3364a2f23bacb8fe8ebb7cec35b538" Jan 20 17:01:36 crc kubenswrapper[4995]: I0120 17:01:36.990929 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:01:36 crc kubenswrapper[4995]: E0120 17:01:36.994999 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:01:47 crc kubenswrapper[4995]: I0120 17:01:47.990437 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:01:47 crc kubenswrapper[4995]: E0120 17:01:47.991276 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:01:59 crc kubenswrapper[4995]: I0120 17:01:59.055131 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5c6sb"] Jan 20 17:01:59 crc kubenswrapper[4995]: I0120 17:01:59.063914 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-5c6sb"] Jan 20 17:01:59 crc kubenswrapper[4995]: I0120 17:01:59.990912 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:01:59 crc kubenswrapper[4995]: E0120 17:01:59.991506 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:02:00 crc kubenswrapper[4995]: I0120 17:02:00.002097 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f580cd7-fea0-4fb0-a858-74e2deb15c87" path="/var/lib/kubelet/pods/6f580cd7-fea0-4fb0-a858-74e2deb15c87/volumes" Jan 20 17:02:12 crc kubenswrapper[4995]: I0120 17:02:12.989529 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:02:14 crc kubenswrapper[4995]: I0120 17:02:14.912553 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"2dddbfd37aa100041abe11a85d1d53ea3e6d3d040244c5dbe43296741e29c68b"} Jan 20 17:02:16 crc kubenswrapper[4995]: I0120 17:02:16.931998 4995 generic.go:334] "Generic (PLEG): container finished" podID="6d2d90c1-a32f-4ec1-82e9-4d4440542e43" containerID="c834ed707242b2936a40903d385e76bdd066f4b7d160a8cc32461e92335ce299" exitCode=0 Jan 20 17:02:16 crc kubenswrapper[4995]: I0120 17:02:16.932571 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" event={"ID":"6d2d90c1-a32f-4ec1-82e9-4d4440542e43","Type":"ContainerDied","Data":"c834ed707242b2936a40903d385e76bdd066f4b7d160a8cc32461e92335ce299"} Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.387364 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.427001 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5m7n\" (UniqueName: \"kubernetes.io/projected/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-kube-api-access-l5m7n\") pod \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.427140 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-inventory\") pod \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.427229 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-ssh-key-openstack-edpm-ipam\") pod \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\" (UID: \"6d2d90c1-a32f-4ec1-82e9-4d4440542e43\") " Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.432488 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-kube-api-access-l5m7n" (OuterVolumeSpecName: "kube-api-access-l5m7n") pod "6d2d90c1-a32f-4ec1-82e9-4d4440542e43" (UID: "6d2d90c1-a32f-4ec1-82e9-4d4440542e43"). InnerVolumeSpecName "kube-api-access-l5m7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.475066 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-inventory" (OuterVolumeSpecName: "inventory") pod "6d2d90c1-a32f-4ec1-82e9-4d4440542e43" (UID: "6d2d90c1-a32f-4ec1-82e9-4d4440542e43"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.475703 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "6d2d90c1-a32f-4ec1-82e9-4d4440542e43" (UID: "6d2d90c1-a32f-4ec1-82e9-4d4440542e43"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.529087 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5m7n\" (UniqueName: \"kubernetes.io/projected/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-kube-api-access-l5m7n\") on node \"crc\" DevicePath \"\"" Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.529193 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.529208 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6d2d90c1-a32f-4ec1-82e9-4d4440542e43-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.955945 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" event={"ID":"6d2d90c1-a32f-4ec1-82e9-4d4440542e43","Type":"ContainerDied","Data":"4fa68e7b96d734aaa92f4a0a7c4911009c09127a02ca50a8911bbe90df7e3045"} Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.955995 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fa68e7b96d734aaa92f4a0a7c4911009c09127a02ca50a8911bbe90df7e3045" Jan 20 17:02:18 crc kubenswrapper[4995]: I0120 17:02:18.956376 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-t744h" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.061337 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5"] Jan 20 17:02:19 crc kubenswrapper[4995]: E0120 17:02:19.061800 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2050c7c-ffc8-4deb-89d8-f6cc0ee15601" containerName="keystone-cron" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.061824 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2050c7c-ffc8-4deb-89d8-f6cc0ee15601" containerName="keystone-cron" Jan 20 17:02:19 crc kubenswrapper[4995]: E0120 17:02:19.061849 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d2d90c1-a32f-4ec1-82e9-4d4440542e43" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.061859 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d2d90c1-a32f-4ec1-82e9-4d4440542e43" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.062101 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2050c7c-ffc8-4deb-89d8-f6cc0ee15601" containerName="keystone-cron" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.062131 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d2d90c1-a32f-4ec1-82e9-4d4440542e43" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.062986 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.066221 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.067151 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.070444 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.078104 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5"] Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.089319 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.141370 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.141604 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.141704 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rd28\" (UniqueName: \"kubernetes.io/projected/78a82208-d087-4194-ab1e-c3df98c3321e-kube-api-access-5rd28\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.243614 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.243956 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rd28\" (UniqueName: \"kubernetes.io/projected/78a82208-d087-4194-ab1e-c3df98c3321e-kube-api-access-5rd28\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.244271 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.252710 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.253729 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.263223 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rd28\" (UniqueName: \"kubernetes.io/projected/78a82208-d087-4194-ab1e-c3df98c3321e-kube-api-access-5rd28\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.388559 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.909465 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5"] Jan 20 17:02:19 crc kubenswrapper[4995]: I0120 17:02:19.974405 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" event={"ID":"78a82208-d087-4194-ab1e-c3df98c3321e","Type":"ContainerStarted","Data":"23e7339d15b09a906597e607f47d48528ffe84e93bae3b91e61da058239aacc7"} Jan 20 17:02:20 crc kubenswrapper[4995]: I0120 17:02:20.984303 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" event={"ID":"78a82208-d087-4194-ab1e-c3df98c3321e","Type":"ContainerStarted","Data":"f077b269dce80a4abf97f953bb2409de108c16750ba8dbcbb1109141fc5b0235"} Jan 20 17:02:21 crc kubenswrapper[4995]: I0120 17:02:21.002437 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" podStartSLOduration=1.403155771 podStartE2EDuration="2.002410123s" podCreationTimestamp="2026-01-20 17:02:19 +0000 UTC" firstStartedPulling="2026-01-20 17:02:19.915882939 +0000 UTC m=+1858.160487745" lastFinishedPulling="2026-01-20 17:02:20.515137291 +0000 UTC m=+1858.759742097" observedRunningTime="2026-01-20 17:02:20.999174165 +0000 UTC m=+1859.243778981" watchObservedRunningTime="2026-01-20 17:02:21.002410123 +0000 UTC m=+1859.247014949" Jan 20 17:02:26 crc kubenswrapper[4995]: I0120 17:02:26.034570 4995 generic.go:334] "Generic (PLEG): container finished" podID="78a82208-d087-4194-ab1e-c3df98c3321e" containerID="f077b269dce80a4abf97f953bb2409de108c16750ba8dbcbb1109141fc5b0235" exitCode=0 Jan 20 17:02:26 crc kubenswrapper[4995]: I0120 17:02:26.034652 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" event={"ID":"78a82208-d087-4194-ab1e-c3df98c3321e","Type":"ContainerDied","Data":"f077b269dce80a4abf97f953bb2409de108c16750ba8dbcbb1109141fc5b0235"} Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.514982 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.612485 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-inventory\") pod \"78a82208-d087-4194-ab1e-c3df98c3321e\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.612636 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-ssh-key-openstack-edpm-ipam\") pod \"78a82208-d087-4194-ab1e-c3df98c3321e\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.612670 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rd28\" (UniqueName: \"kubernetes.io/projected/78a82208-d087-4194-ab1e-c3df98c3321e-kube-api-access-5rd28\") pod \"78a82208-d087-4194-ab1e-c3df98c3321e\" (UID: \"78a82208-d087-4194-ab1e-c3df98c3321e\") " Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.619171 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78a82208-d087-4194-ab1e-c3df98c3321e-kube-api-access-5rd28" (OuterVolumeSpecName: "kube-api-access-5rd28") pod "78a82208-d087-4194-ab1e-c3df98c3321e" (UID: "78a82208-d087-4194-ab1e-c3df98c3321e"). InnerVolumeSpecName "kube-api-access-5rd28". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.642226 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "78a82208-d087-4194-ab1e-c3df98c3321e" (UID: "78a82208-d087-4194-ab1e-c3df98c3321e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.656305 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-inventory" (OuterVolumeSpecName: "inventory") pod "78a82208-d087-4194-ab1e-c3df98c3321e" (UID: "78a82208-d087-4194-ab1e-c3df98c3321e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.714731 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.714769 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rd28\" (UniqueName: \"kubernetes.io/projected/78a82208-d087-4194-ab1e-c3df98c3321e-kube-api-access-5rd28\") on node \"crc\" DevicePath \"\"" Jan 20 17:02:27 crc kubenswrapper[4995]: I0120 17:02:27.714785 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78a82208-d087-4194-ab1e-c3df98c3321e-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.067049 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" event={"ID":"78a82208-d087-4194-ab1e-c3df98c3321e","Type":"ContainerDied","Data":"23e7339d15b09a906597e607f47d48528ffe84e93bae3b91e61da058239aacc7"} Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.067111 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23e7339d15b09a906597e607f47d48528ffe84e93bae3b91e61da058239aacc7" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.067170 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.136935 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr"] Jan 20 17:02:28 crc kubenswrapper[4995]: E0120 17:02:28.138127 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78a82208-d087-4194-ab1e-c3df98c3321e" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.138166 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="78a82208-d087-4194-ab1e-c3df98c3321e" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.138547 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="78a82208-d087-4194-ab1e-c3df98c3321e" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.139999 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.150678 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.150959 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.152214 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.154442 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr"] Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.155357 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.223767 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-rnrwr\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.224119 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-rnrwr\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.224250 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnvzn\" (UniqueName: \"kubernetes.io/projected/f6888ab8-4be9-45c2-b50d-46927fd64cba-kube-api-access-tnvzn\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-rnrwr\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.326510 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-rnrwr\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.326659 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnvzn\" (UniqueName: \"kubernetes.io/projected/f6888ab8-4be9-45c2-b50d-46927fd64cba-kube-api-access-tnvzn\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-rnrwr\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.326744 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-rnrwr\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.332662 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-rnrwr\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.335400 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-rnrwr\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.352877 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnvzn\" (UniqueName: \"kubernetes.io/projected/f6888ab8-4be9-45c2-b50d-46927fd64cba-kube-api-access-tnvzn\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-rnrwr\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:28 crc kubenswrapper[4995]: I0120 17:02:28.469977 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:02:29 crc kubenswrapper[4995]: I0120 17:02:29.111124 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr"] Jan 20 17:02:30 crc kubenswrapper[4995]: I0120 17:02:30.098103 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" event={"ID":"f6888ab8-4be9-45c2-b50d-46927fd64cba","Type":"ContainerStarted","Data":"bae95ffd05faf0e37d3815a14dd718c14ca93b7f646c8d072b4719de81d38df8"} Jan 20 17:02:31 crc kubenswrapper[4995]: I0120 17:02:31.109217 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" event={"ID":"f6888ab8-4be9-45c2-b50d-46927fd64cba","Type":"ContainerStarted","Data":"01c1e2e19017519c1bade5804c242aa0cc79fadf8f8c4c9feeb9eb096a21c44c"} Jan 20 17:02:31 crc kubenswrapper[4995]: I0120 17:02:31.137617 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" podStartSLOduration=2.066020207 podStartE2EDuration="3.137594826s" podCreationTimestamp="2026-01-20 17:02:28 +0000 UTC" firstStartedPulling="2026-01-20 17:02:29.107702844 +0000 UTC m=+1867.352307650" lastFinishedPulling="2026-01-20 17:02:30.179277443 +0000 UTC m=+1868.423882269" observedRunningTime="2026-01-20 17:02:31.128000867 +0000 UTC m=+1869.372605683" watchObservedRunningTime="2026-01-20 17:02:31.137594826 +0000 UTC m=+1869.382199632" Jan 20 17:02:32 crc kubenswrapper[4995]: I0120 17:02:32.592419 4995 scope.go:117] "RemoveContainer" containerID="611c1eed9b3a0628f4c97d98931a8c066cfb894620af90551690dc6e0e45d957" Jan 20 17:02:35 crc kubenswrapper[4995]: I0120 17:02:35.041249 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-c4552"] Jan 20 17:02:35 crc kubenswrapper[4995]: I0120 17:02:35.050015 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-c4552"] Jan 20 17:02:36 crc kubenswrapper[4995]: I0120 17:02:36.002330 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10aafbd1-4abd-4f5b-b6bd-975177b3e6e5" path="/var/lib/kubelet/pods/10aafbd1-4abd-4f5b-b6bd-975177b3e6e5/volumes" Jan 20 17:02:36 crc kubenswrapper[4995]: I0120 17:02:36.036376 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qcr9x"] Jan 20 17:02:36 crc kubenswrapper[4995]: I0120 17:02:36.052418 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-qcr9x"] Jan 20 17:02:38 crc kubenswrapper[4995]: I0120 17:02:38.005497 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cf9607f-a24d-47f2-bcde-a777ed54f2a8" path="/var/lib/kubelet/pods/0cf9607f-a24d-47f2-bcde-a777ed54f2a8/volumes" Jan 20 17:03:14 crc kubenswrapper[4995]: I0120 17:03:14.634724 4995 generic.go:334] "Generic (PLEG): container finished" podID="f6888ab8-4be9-45c2-b50d-46927fd64cba" containerID="01c1e2e19017519c1bade5804c242aa0cc79fadf8f8c4c9feeb9eb096a21c44c" exitCode=0 Jan 20 17:03:14 crc kubenswrapper[4995]: I0120 17:03:14.634822 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" event={"ID":"f6888ab8-4be9-45c2-b50d-46927fd64cba","Type":"ContainerDied","Data":"01c1e2e19017519c1bade5804c242aa0cc79fadf8f8c4c9feeb9eb096a21c44c"} Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.070842 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.158689 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-inventory\") pod \"f6888ab8-4be9-45c2-b50d-46927fd64cba\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.158760 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnvzn\" (UniqueName: \"kubernetes.io/projected/f6888ab8-4be9-45c2-b50d-46927fd64cba-kube-api-access-tnvzn\") pod \"f6888ab8-4be9-45c2-b50d-46927fd64cba\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.158988 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-ssh-key-openstack-edpm-ipam\") pod \"f6888ab8-4be9-45c2-b50d-46927fd64cba\" (UID: \"f6888ab8-4be9-45c2-b50d-46927fd64cba\") " Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.168811 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6888ab8-4be9-45c2-b50d-46927fd64cba-kube-api-access-tnvzn" (OuterVolumeSpecName: "kube-api-access-tnvzn") pod "f6888ab8-4be9-45c2-b50d-46927fd64cba" (UID: "f6888ab8-4be9-45c2-b50d-46927fd64cba"). InnerVolumeSpecName "kube-api-access-tnvzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.188477 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-inventory" (OuterVolumeSpecName: "inventory") pod "f6888ab8-4be9-45c2-b50d-46927fd64cba" (UID: "f6888ab8-4be9-45c2-b50d-46927fd64cba"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.192900 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "f6888ab8-4be9-45c2-b50d-46927fd64cba" (UID: "f6888ab8-4be9-45c2-b50d-46927fd64cba"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.261149 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.261199 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6888ab8-4be9-45c2-b50d-46927fd64cba-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.261217 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnvzn\" (UniqueName: \"kubernetes.io/projected/f6888ab8-4be9-45c2-b50d-46927fd64cba-kube-api-access-tnvzn\") on node \"crc\" DevicePath \"\"" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.653015 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" event={"ID":"f6888ab8-4be9-45c2-b50d-46927fd64cba","Type":"ContainerDied","Data":"bae95ffd05faf0e37d3815a14dd718c14ca93b7f646c8d072b4719de81d38df8"} Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.653058 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bae95ffd05faf0e37d3815a14dd718c14ca93b7f646c8d072b4719de81d38df8" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.653096 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-rnrwr" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.753612 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj"] Jan 20 17:03:16 crc kubenswrapper[4995]: E0120 17:03:16.754069 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6888ab8-4be9-45c2-b50d-46927fd64cba" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.754107 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6888ab8-4be9-45c2-b50d-46927fd64cba" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.754341 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6888ab8-4be9-45c2-b50d-46927fd64cba" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.755113 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.757426 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.757673 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.757765 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.760269 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.762257 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj"] Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.872516 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-j47qj\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.872657 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ghh7\" (UniqueName: \"kubernetes.io/projected/b46e1f63-68f8-4cb0-835d-5d35ece39037-kube-api-access-5ghh7\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-j47qj\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.872685 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-j47qj\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.974953 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ghh7\" (UniqueName: \"kubernetes.io/projected/b46e1f63-68f8-4cb0-835d-5d35ece39037-kube-api-access-5ghh7\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-j47qj\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.975019 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-j47qj\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.975131 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-j47qj\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.982929 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-j47qj\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:16 crc kubenswrapper[4995]: I0120 17:03:16.983275 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-j47qj\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:17 crc kubenswrapper[4995]: I0120 17:03:17.001331 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ghh7\" (UniqueName: \"kubernetes.io/projected/b46e1f63-68f8-4cb0-835d-5d35ece39037-kube-api-access-5ghh7\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-j47qj\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:17 crc kubenswrapper[4995]: I0120 17:03:17.075389 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:03:17 crc kubenswrapper[4995]: I0120 17:03:17.674087 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj"] Jan 20 17:03:18 crc kubenswrapper[4995]: I0120 17:03:18.738605 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" event={"ID":"b46e1f63-68f8-4cb0-835d-5d35ece39037","Type":"ContainerStarted","Data":"b5331d0f9b77792bcc47f884488ccb293d813400b0f9942a87da653a615a846c"} Jan 20 17:03:18 crc kubenswrapper[4995]: I0120 17:03:18.739166 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" event={"ID":"b46e1f63-68f8-4cb0-835d-5d35ece39037","Type":"ContainerStarted","Data":"dd7967d3607cf5465d806342b887a4003f540355f458fd155c3b77260058c991"} Jan 20 17:03:18 crc kubenswrapper[4995]: I0120 17:03:18.780880 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" podStartSLOduration=2.202668974 podStartE2EDuration="2.780858456s" podCreationTimestamp="2026-01-20 17:03:16 +0000 UTC" firstStartedPulling="2026-01-20 17:03:17.686641464 +0000 UTC m=+1915.931246270" lastFinishedPulling="2026-01-20 17:03:18.264830946 +0000 UTC m=+1916.509435752" observedRunningTime="2026-01-20 17:03:18.772685255 +0000 UTC m=+1917.017290071" watchObservedRunningTime="2026-01-20 17:03:18.780858456 +0000 UTC m=+1917.025463262" Jan 20 17:03:21 crc kubenswrapper[4995]: I0120 17:03:21.053733 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-gp9bb"] Jan 20 17:03:21 crc kubenswrapper[4995]: I0120 17:03:21.069900 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-gp9bb"] Jan 20 17:03:22 crc kubenswrapper[4995]: I0120 17:03:22.007689 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f5ec115-81c7-46cc-bf53-b48157c83915" path="/var/lib/kubelet/pods/3f5ec115-81c7-46cc-bf53-b48157c83915/volumes" Jan 20 17:03:33 crc kubenswrapper[4995]: I0120 17:03:33.355392 4995 scope.go:117] "RemoveContainer" containerID="daa09eef2ead9557850538a2a180ef185523c81eb1fcd4785f5965b2917e5edf" Jan 20 17:03:33 crc kubenswrapper[4995]: I0120 17:03:33.435595 4995 scope.go:117] "RemoveContainer" containerID="3aba7627c4bf070ffa879b48848eb4db2271378f8655845f107719e00f1345be" Jan 20 17:03:33 crc kubenswrapper[4995]: I0120 17:03:33.483754 4995 scope.go:117] "RemoveContainer" containerID="2466dc2d36864131fa8bf12c0a882e03e2f357f5041d857fde5e7b5e7ac19837" Jan 20 17:03:39 crc kubenswrapper[4995]: I0120 17:03:39.979029 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2q2tw"] Jan 20 17:03:39 crc kubenswrapper[4995]: I0120 17:03:39.982767 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.005925 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2q2tw"] Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.068358 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-utilities\") pod \"certified-operators-2q2tw\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.068418 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-catalog-content\") pod \"certified-operators-2q2tw\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.068750 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbcc7\" (UniqueName: \"kubernetes.io/projected/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-kube-api-access-zbcc7\") pod \"certified-operators-2q2tw\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.170526 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbcc7\" (UniqueName: \"kubernetes.io/projected/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-kube-api-access-zbcc7\") pod \"certified-operators-2q2tw\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.170684 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-utilities\") pod \"certified-operators-2q2tw\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.170718 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-catalog-content\") pod \"certified-operators-2q2tw\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.171348 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-catalog-content\") pod \"certified-operators-2q2tw\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.171996 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-utilities\") pod \"certified-operators-2q2tw\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.194235 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbcc7\" (UniqueName: \"kubernetes.io/projected/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-kube-api-access-zbcc7\") pod \"certified-operators-2q2tw\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.305169 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.807166 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2q2tw"] Jan 20 17:03:40 crc kubenswrapper[4995]: I0120 17:03:40.931906 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2q2tw" event={"ID":"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921","Type":"ContainerStarted","Data":"d63f047de49848588ac931883d06db8a21508067269a2be3c2376557806e0788"} Jan 20 17:03:41 crc kubenswrapper[4995]: I0120 17:03:41.953723 4995 generic.go:334] "Generic (PLEG): container finished" podID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerID="55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2" exitCode=0 Jan 20 17:03:41 crc kubenswrapper[4995]: I0120 17:03:41.953962 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2q2tw" event={"ID":"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921","Type":"ContainerDied","Data":"55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2"} Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.186039 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xshnj"] Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.189677 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.196156 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xshnj"] Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.225555 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-utilities\") pod \"redhat-marketplace-xshnj\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.225647 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78bbd\" (UniqueName: \"kubernetes.io/projected/eb8ac5db-4b51-44a7-b558-4e456b545419-kube-api-access-78bbd\") pod \"redhat-marketplace-xshnj\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.225692 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-catalog-content\") pod \"redhat-marketplace-xshnj\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.327580 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78bbd\" (UniqueName: \"kubernetes.io/projected/eb8ac5db-4b51-44a7-b558-4e456b545419-kube-api-access-78bbd\") pod \"redhat-marketplace-xshnj\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.327648 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-catalog-content\") pod \"redhat-marketplace-xshnj\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.327747 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-utilities\") pod \"redhat-marketplace-xshnj\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.328158 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-utilities\") pod \"redhat-marketplace-xshnj\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.328675 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-catalog-content\") pod \"redhat-marketplace-xshnj\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.358315 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78bbd\" (UniqueName: \"kubernetes.io/projected/eb8ac5db-4b51-44a7-b558-4e456b545419-kube-api-access-78bbd\") pod \"redhat-marketplace-xshnj\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.507233 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.978835 4995 generic.go:334] "Generic (PLEG): container finished" podID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerID="658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1" exitCode=0 Jan 20 17:03:43 crc kubenswrapper[4995]: I0120 17:03:43.978919 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2q2tw" event={"ID":"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921","Type":"ContainerDied","Data":"658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1"} Jan 20 17:03:44 crc kubenswrapper[4995]: I0120 17:03:44.044264 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xshnj"] Jan 20 17:03:44 crc kubenswrapper[4995]: I0120 17:03:44.992947 4995 generic.go:334] "Generic (PLEG): container finished" podID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerID="8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce" exitCode=0 Jan 20 17:03:44 crc kubenswrapper[4995]: I0120 17:03:44.993243 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xshnj" event={"ID":"eb8ac5db-4b51-44a7-b558-4e456b545419","Type":"ContainerDied","Data":"8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce"} Jan 20 17:03:44 crc kubenswrapper[4995]: I0120 17:03:44.993482 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xshnj" event={"ID":"eb8ac5db-4b51-44a7-b558-4e456b545419","Type":"ContainerStarted","Data":"ed8aeede436a53f80de32937bd82b45ab1160afde1b5afb559c7fddc90f52b90"} Jan 20 17:03:45 crc kubenswrapper[4995]: I0120 17:03:44.999922 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2q2tw" event={"ID":"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921","Type":"ContainerStarted","Data":"5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848"} Jan 20 17:03:45 crc kubenswrapper[4995]: I0120 17:03:45.047646 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2q2tw" podStartSLOduration=3.478124264 podStartE2EDuration="6.047619364s" podCreationTimestamp="2026-01-20 17:03:39 +0000 UTC" firstStartedPulling="2026-01-20 17:03:41.958044516 +0000 UTC m=+1940.202649332" lastFinishedPulling="2026-01-20 17:03:44.527539616 +0000 UTC m=+1942.772144432" observedRunningTime="2026-01-20 17:03:45.036318328 +0000 UTC m=+1943.280923144" watchObservedRunningTime="2026-01-20 17:03:45.047619364 +0000 UTC m=+1943.292224170" Jan 20 17:03:47 crc kubenswrapper[4995]: I0120 17:03:47.031795 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xshnj" event={"ID":"eb8ac5db-4b51-44a7-b558-4e456b545419","Type":"ContainerStarted","Data":"b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91"} Jan 20 17:03:48 crc kubenswrapper[4995]: I0120 17:03:48.043950 4995 generic.go:334] "Generic (PLEG): container finished" podID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerID="b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91" exitCode=0 Jan 20 17:03:48 crc kubenswrapper[4995]: I0120 17:03:48.044059 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xshnj" event={"ID":"eb8ac5db-4b51-44a7-b558-4e456b545419","Type":"ContainerDied","Data":"b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91"} Jan 20 17:03:49 crc kubenswrapper[4995]: I0120 17:03:49.054310 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xshnj" event={"ID":"eb8ac5db-4b51-44a7-b558-4e456b545419","Type":"ContainerStarted","Data":"f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7"} Jan 20 17:03:49 crc kubenswrapper[4995]: I0120 17:03:49.081007 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xshnj" podStartSLOduration=2.513083858 podStartE2EDuration="6.080987127s" podCreationTimestamp="2026-01-20 17:03:43 +0000 UTC" firstStartedPulling="2026-01-20 17:03:44.996593853 +0000 UTC m=+1943.241198659" lastFinishedPulling="2026-01-20 17:03:48.564497122 +0000 UTC m=+1946.809101928" observedRunningTime="2026-01-20 17:03:49.070638997 +0000 UTC m=+1947.315243803" watchObservedRunningTime="2026-01-20 17:03:49.080987127 +0000 UTC m=+1947.325591933" Jan 20 17:03:50 crc kubenswrapper[4995]: I0120 17:03:50.307653 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:50 crc kubenswrapper[4995]: I0120 17:03:50.308022 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:50 crc kubenswrapper[4995]: I0120 17:03:50.361402 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:51 crc kubenswrapper[4995]: I0120 17:03:51.142331 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:52 crc kubenswrapper[4995]: I0120 17:03:52.371174 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2q2tw"] Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.087952 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2q2tw" podUID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerName="registry-server" containerID="cri-o://5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848" gracePeriod=2 Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.508598 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.510105 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.568728 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.609753 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.657239 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-utilities\") pod \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.657666 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-catalog-content\") pod \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.658450 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-utilities" (OuterVolumeSpecName: "utilities") pod "4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" (UID: "4b9fb41b-ae1d-4d1f-9b67-1825f60a9921"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.666229 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbcc7\" (UniqueName: \"kubernetes.io/projected/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-kube-api-access-zbcc7\") pod \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\" (UID: \"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921\") " Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.667308 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.681934 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-kube-api-access-zbcc7" (OuterVolumeSpecName: "kube-api-access-zbcc7") pod "4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" (UID: "4b9fb41b-ae1d-4d1f-9b67-1825f60a9921"). InnerVolumeSpecName "kube-api-access-zbcc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.697367 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" (UID: "4b9fb41b-ae1d-4d1f-9b67-1825f60a9921"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.770629 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:03:53 crc kubenswrapper[4995]: I0120 17:03:53.770698 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbcc7\" (UniqueName: \"kubernetes.io/projected/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921-kube-api-access-zbcc7\") on node \"crc\" DevicePath \"\"" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.112499 4995 generic.go:334] "Generic (PLEG): container finished" podID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerID="5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848" exitCode=0 Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.112572 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2q2tw" event={"ID":"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921","Type":"ContainerDied","Data":"5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848"} Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.112619 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2q2tw" event={"ID":"4b9fb41b-ae1d-4d1f-9b67-1825f60a9921","Type":"ContainerDied","Data":"d63f047de49848588ac931883d06db8a21508067269a2be3c2376557806e0788"} Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.112641 4995 scope.go:117] "RemoveContainer" containerID="5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.112639 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2q2tw" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.142777 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2q2tw"] Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.144863 4995 scope.go:117] "RemoveContainer" containerID="658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.151267 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2q2tw"] Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.169590 4995 scope.go:117] "RemoveContainer" containerID="55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.186555 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.223555 4995 scope.go:117] "RemoveContainer" containerID="5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848" Jan 20 17:03:54 crc kubenswrapper[4995]: E0120 17:03:54.224106 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848\": container with ID starting with 5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848 not found: ID does not exist" containerID="5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.224166 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848"} err="failed to get container status \"5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848\": rpc error: code = NotFound desc = could not find container \"5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848\": container with ID starting with 5037e1c838966393bf30420f046a02bcfeded98d76d32a6656f8c970fdced848 not found: ID does not exist" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.224214 4995 scope.go:117] "RemoveContainer" containerID="658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1" Jan 20 17:03:54 crc kubenswrapper[4995]: E0120 17:03:54.224700 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1\": container with ID starting with 658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1 not found: ID does not exist" containerID="658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.224731 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1"} err="failed to get container status \"658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1\": rpc error: code = NotFound desc = could not find container \"658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1\": container with ID starting with 658d4d691f9e14cfd4ad710335a163a1458b054979c0fa722b084b3118e63ac1 not found: ID does not exist" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.224753 4995 scope.go:117] "RemoveContainer" containerID="55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2" Jan 20 17:03:54 crc kubenswrapper[4995]: E0120 17:03:54.225156 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2\": container with ID starting with 55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2 not found: ID does not exist" containerID="55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2" Jan 20 17:03:54 crc kubenswrapper[4995]: I0120 17:03:54.225179 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2"} err="failed to get container status \"55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2\": rpc error: code = NotFound desc = could not find container \"55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2\": container with ID starting with 55a3156e9a184bfb08d4368cb40f542cbb13a2769b99191fa2cc19a82cde75d2 not found: ID does not exist" Jan 20 17:03:56 crc kubenswrapper[4995]: I0120 17:03:56.029888 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" path="/var/lib/kubelet/pods/4b9fb41b-ae1d-4d1f-9b67-1825f60a9921/volumes" Jan 20 17:03:56 crc kubenswrapper[4995]: I0120 17:03:56.170536 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xshnj"] Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.139427 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xshnj" podUID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerName="registry-server" containerID="cri-o://f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7" gracePeriod=2 Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.554733 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.648126 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-utilities\") pod \"eb8ac5db-4b51-44a7-b558-4e456b545419\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.648232 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78bbd\" (UniqueName: \"kubernetes.io/projected/eb8ac5db-4b51-44a7-b558-4e456b545419-kube-api-access-78bbd\") pod \"eb8ac5db-4b51-44a7-b558-4e456b545419\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.648309 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-catalog-content\") pod \"eb8ac5db-4b51-44a7-b558-4e456b545419\" (UID: \"eb8ac5db-4b51-44a7-b558-4e456b545419\") " Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.649315 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-utilities" (OuterVolumeSpecName: "utilities") pod "eb8ac5db-4b51-44a7-b558-4e456b545419" (UID: "eb8ac5db-4b51-44a7-b558-4e456b545419"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.653970 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb8ac5db-4b51-44a7-b558-4e456b545419-kube-api-access-78bbd" (OuterVolumeSpecName: "kube-api-access-78bbd") pod "eb8ac5db-4b51-44a7-b558-4e456b545419" (UID: "eb8ac5db-4b51-44a7-b558-4e456b545419"). InnerVolumeSpecName "kube-api-access-78bbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.670285 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb8ac5db-4b51-44a7-b558-4e456b545419" (UID: "eb8ac5db-4b51-44a7-b558-4e456b545419"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.750116 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.750146 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb8ac5db-4b51-44a7-b558-4e456b545419-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:03:57 crc kubenswrapper[4995]: I0120 17:03:57.750160 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78bbd\" (UniqueName: \"kubernetes.io/projected/eb8ac5db-4b51-44a7-b558-4e456b545419-kube-api-access-78bbd\") on node \"crc\" DevicePath \"\"" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.150094 4995 generic.go:334] "Generic (PLEG): container finished" podID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerID="f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7" exitCode=0 Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.150131 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xshnj" event={"ID":"eb8ac5db-4b51-44a7-b558-4e456b545419","Type":"ContainerDied","Data":"f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7"} Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.150157 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xshnj" event={"ID":"eb8ac5db-4b51-44a7-b558-4e456b545419","Type":"ContainerDied","Data":"ed8aeede436a53f80de32937bd82b45ab1160afde1b5afb559c7fddc90f52b90"} Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.150186 4995 scope.go:117] "RemoveContainer" containerID="f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.150208 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xshnj" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.175433 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xshnj"] Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.179709 4995 scope.go:117] "RemoveContainer" containerID="b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.185447 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xshnj"] Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.197403 4995 scope.go:117] "RemoveContainer" containerID="8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.242218 4995 scope.go:117] "RemoveContainer" containerID="f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7" Jan 20 17:03:58 crc kubenswrapper[4995]: E0120 17:03:58.243178 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7\": container with ID starting with f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7 not found: ID does not exist" containerID="f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.243215 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7"} err="failed to get container status \"f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7\": rpc error: code = NotFound desc = could not find container \"f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7\": container with ID starting with f5b31b2ac472ad061fd211ff31173b053c0dc1b4abce830783961f192e8964f7 not found: ID does not exist" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.243242 4995 scope.go:117] "RemoveContainer" containerID="b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91" Jan 20 17:03:58 crc kubenswrapper[4995]: E0120 17:03:58.243741 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91\": container with ID starting with b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91 not found: ID does not exist" containerID="b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.243775 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91"} err="failed to get container status \"b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91\": rpc error: code = NotFound desc = could not find container \"b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91\": container with ID starting with b7f1d10c9ae6b80d8df8168c72fd33fd95211318e3175bae9fea16aac90c7a91 not found: ID does not exist" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.243794 4995 scope.go:117] "RemoveContainer" containerID="8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce" Jan 20 17:03:58 crc kubenswrapper[4995]: E0120 17:03:58.245521 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce\": container with ID starting with 8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce not found: ID does not exist" containerID="8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce" Jan 20 17:03:58 crc kubenswrapper[4995]: I0120 17:03:58.245545 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce"} err="failed to get container status \"8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce\": rpc error: code = NotFound desc = could not find container \"8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce\": container with ID starting with 8d9c02c3e8d580e20a75def9231a7d2f597a55457e9070d1b3a4b4dc85121cce not found: ID does not exist" Jan 20 17:04:00 crc kubenswrapper[4995]: I0120 17:04:00.001823 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb8ac5db-4b51-44a7-b558-4e456b545419" path="/var/lib/kubelet/pods/eb8ac5db-4b51-44a7-b558-4e456b545419/volumes" Jan 20 17:04:17 crc kubenswrapper[4995]: I0120 17:04:17.367292 4995 generic.go:334] "Generic (PLEG): container finished" podID="b46e1f63-68f8-4cb0-835d-5d35ece39037" containerID="b5331d0f9b77792bcc47f884488ccb293d813400b0f9942a87da653a615a846c" exitCode=0 Jan 20 17:04:17 crc kubenswrapper[4995]: I0120 17:04:17.367501 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" event={"ID":"b46e1f63-68f8-4cb0-835d-5d35ece39037","Type":"ContainerDied","Data":"b5331d0f9b77792bcc47f884488ccb293d813400b0f9942a87da653a615a846c"} Jan 20 17:04:18 crc kubenswrapper[4995]: I0120 17:04:18.891937 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:04:18 crc kubenswrapper[4995]: I0120 17:04:18.915955 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-ssh-key-openstack-edpm-ipam\") pod \"b46e1f63-68f8-4cb0-835d-5d35ece39037\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " Jan 20 17:04:18 crc kubenswrapper[4995]: I0120 17:04:18.916283 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-inventory\") pod \"b46e1f63-68f8-4cb0-835d-5d35ece39037\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " Jan 20 17:04:18 crc kubenswrapper[4995]: I0120 17:04:18.916531 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ghh7\" (UniqueName: \"kubernetes.io/projected/b46e1f63-68f8-4cb0-835d-5d35ece39037-kube-api-access-5ghh7\") pod \"b46e1f63-68f8-4cb0-835d-5d35ece39037\" (UID: \"b46e1f63-68f8-4cb0-835d-5d35ece39037\") " Jan 20 17:04:18 crc kubenswrapper[4995]: I0120 17:04:18.922877 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b46e1f63-68f8-4cb0-835d-5d35ece39037-kube-api-access-5ghh7" (OuterVolumeSpecName: "kube-api-access-5ghh7") pod "b46e1f63-68f8-4cb0-835d-5d35ece39037" (UID: "b46e1f63-68f8-4cb0-835d-5d35ece39037"). InnerVolumeSpecName "kube-api-access-5ghh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:04:18 crc kubenswrapper[4995]: I0120 17:04:18.965254 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-inventory" (OuterVolumeSpecName: "inventory") pod "b46e1f63-68f8-4cb0-835d-5d35ece39037" (UID: "b46e1f63-68f8-4cb0-835d-5d35ece39037"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:04:18 crc kubenswrapper[4995]: I0120 17:04:18.971181 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b46e1f63-68f8-4cb0-835d-5d35ece39037" (UID: "b46e1f63-68f8-4cb0-835d-5d35ece39037"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.019715 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ghh7\" (UniqueName: \"kubernetes.io/projected/b46e1f63-68f8-4cb0-835d-5d35ece39037-kube-api-access-5ghh7\") on node \"crc\" DevicePath \"\"" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.019863 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.020225 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b46e1f63-68f8-4cb0-835d-5d35ece39037-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.398847 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" event={"ID":"b46e1f63-68f8-4cb0-835d-5d35ece39037","Type":"ContainerDied","Data":"dd7967d3607cf5465d806342b887a4003f540355f458fd155c3b77260058c991"} Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.398891 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd7967d3607cf5465d806342b887a4003f540355f458fd155c3b77260058c991" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.398952 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-j47qj" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.504438 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-kr7jc"] Jan 20 17:04:19 crc kubenswrapper[4995]: E0120 17:04:19.505025 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerName="registry-server" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505054 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerName="registry-server" Jan 20 17:04:19 crc kubenswrapper[4995]: E0120 17:04:19.505132 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerName="extract-content" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505149 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerName="extract-content" Jan 20 17:04:19 crc kubenswrapper[4995]: E0120 17:04:19.505176 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerName="registry-server" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505188 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerName="registry-server" Jan 20 17:04:19 crc kubenswrapper[4995]: E0120 17:04:19.505214 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerName="extract-utilities" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505227 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerName="extract-utilities" Jan 20 17:04:19 crc kubenswrapper[4995]: E0120 17:04:19.505254 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerName="extract-content" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505265 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerName="extract-content" Jan 20 17:04:19 crc kubenswrapper[4995]: E0120 17:04:19.505303 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerName="extract-utilities" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505315 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerName="extract-utilities" Jan 20 17:04:19 crc kubenswrapper[4995]: E0120 17:04:19.505335 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b46e1f63-68f8-4cb0-835d-5d35ece39037" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505349 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b46e1f63-68f8-4cb0-835d-5d35ece39037" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505672 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb8ac5db-4b51-44a7-b558-4e456b545419" containerName="registry-server" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505712 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b9fb41b-ae1d-4d1f-9b67-1825f60a9921" containerName="registry-server" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.505730 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b46e1f63-68f8-4cb0-835d-5d35ece39037" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.506790 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.509477 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.509521 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.509638 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.509801 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.525393 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-kr7jc"] Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.529729 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-kr7jc\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.529945 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-kr7jc\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.530015 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkdcx\" (UniqueName: \"kubernetes.io/projected/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-kube-api-access-qkdcx\") pod \"ssh-known-hosts-edpm-deployment-kr7jc\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.630707 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-kr7jc\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.630767 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkdcx\" (UniqueName: \"kubernetes.io/projected/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-kube-api-access-qkdcx\") pod \"ssh-known-hosts-edpm-deployment-kr7jc\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.630858 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-kr7jc\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.637349 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-kr7jc\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.640304 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-kr7jc\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.647647 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkdcx\" (UniqueName: \"kubernetes.io/projected/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-kube-api-access-qkdcx\") pod \"ssh-known-hosts-edpm-deployment-kr7jc\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:19 crc kubenswrapper[4995]: I0120 17:04:19.837986 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:20 crc kubenswrapper[4995]: I0120 17:04:20.206433 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-kr7jc"] Jan 20 17:04:20 crc kubenswrapper[4995]: I0120 17:04:20.206855 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 17:04:20 crc kubenswrapper[4995]: I0120 17:04:20.411010 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" event={"ID":"341c1f6f-c1d4-49a7-8980-7a6f9df0c216","Type":"ContainerStarted","Data":"488a025a177034cae052358b3361bfb8733a333c75dad67ed4a4e4068eb902bc"} Jan 20 17:04:21 crc kubenswrapper[4995]: I0120 17:04:21.422451 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" event={"ID":"341c1f6f-c1d4-49a7-8980-7a6f9df0c216","Type":"ContainerStarted","Data":"bdb17f54f78abad429b3cf4ea42f3739cfe90251342a496b9b74537430ce909a"} Jan 20 17:04:21 crc kubenswrapper[4995]: I0120 17:04:21.453262 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" podStartSLOduration=1.774715983 podStartE2EDuration="2.453238215s" podCreationTimestamp="2026-01-20 17:04:19 +0000 UTC" firstStartedPulling="2026-01-20 17:04:20.20663061 +0000 UTC m=+1978.451235406" lastFinishedPulling="2026-01-20 17:04:20.885152792 +0000 UTC m=+1979.129757638" observedRunningTime="2026-01-20 17:04:21.444760265 +0000 UTC m=+1979.689365071" watchObservedRunningTime="2026-01-20 17:04:21.453238215 +0000 UTC m=+1979.697843031" Jan 20 17:04:29 crc kubenswrapper[4995]: I0120 17:04:29.515236 4995 generic.go:334] "Generic (PLEG): container finished" podID="341c1f6f-c1d4-49a7-8980-7a6f9df0c216" containerID="bdb17f54f78abad429b3cf4ea42f3739cfe90251342a496b9b74537430ce909a" exitCode=0 Jan 20 17:04:29 crc kubenswrapper[4995]: I0120 17:04:29.515344 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" event={"ID":"341c1f6f-c1d4-49a7-8980-7a6f9df0c216","Type":"ContainerDied","Data":"bdb17f54f78abad429b3cf4ea42f3739cfe90251342a496b9b74537430ce909a"} Jan 20 17:04:30 crc kubenswrapper[4995]: I0120 17:04:30.571607 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:04:30 crc kubenswrapper[4995]: I0120 17:04:30.572228 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:04:30 crc kubenswrapper[4995]: I0120 17:04:30.988917 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.181020 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkdcx\" (UniqueName: \"kubernetes.io/projected/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-kube-api-access-qkdcx\") pod \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.181108 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-ssh-key-openstack-edpm-ipam\") pod \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.181302 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-inventory-0\") pod \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\" (UID: \"341c1f6f-c1d4-49a7-8980-7a6f9df0c216\") " Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.188896 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-kube-api-access-qkdcx" (OuterVolumeSpecName: "kube-api-access-qkdcx") pod "341c1f6f-c1d4-49a7-8980-7a6f9df0c216" (UID: "341c1f6f-c1d4-49a7-8980-7a6f9df0c216"). InnerVolumeSpecName "kube-api-access-qkdcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.213801 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "341c1f6f-c1d4-49a7-8980-7a6f9df0c216" (UID: "341c1f6f-c1d4-49a7-8980-7a6f9df0c216"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.214479 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "341c1f6f-c1d4-49a7-8980-7a6f9df0c216" (UID: "341c1f6f-c1d4-49a7-8980-7a6f9df0c216"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.284329 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkdcx\" (UniqueName: \"kubernetes.io/projected/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-kube-api-access-qkdcx\") on node \"crc\" DevicePath \"\"" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.284379 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.284403 4995 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/341c1f6f-c1d4-49a7-8980-7a6f9df0c216-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.548048 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" event={"ID":"341c1f6f-c1d4-49a7-8980-7a6f9df0c216","Type":"ContainerDied","Data":"488a025a177034cae052358b3361bfb8733a333c75dad67ed4a4e4068eb902bc"} Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.548104 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="488a025a177034cae052358b3361bfb8733a333c75dad67ed4a4e4068eb902bc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.548285 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-kr7jc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.634164 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc"] Jan 20 17:04:31 crc kubenswrapper[4995]: E0120 17:04:31.634722 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="341c1f6f-c1d4-49a7-8980-7a6f9df0c216" containerName="ssh-known-hosts-edpm-deployment" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.634740 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="341c1f6f-c1d4-49a7-8980-7a6f9df0c216" containerName="ssh-known-hosts-edpm-deployment" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.634980 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="341c1f6f-c1d4-49a7-8980-7a6f9df0c216" containerName="ssh-known-hosts-edpm-deployment" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.635827 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.638586 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.638756 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.639152 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.644656 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.645960 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc"] Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.791466 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-46zxc\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.791558 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-46zxc\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.792141 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6kkf\" (UniqueName: \"kubernetes.io/projected/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-kube-api-access-j6kkf\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-46zxc\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.894255 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-46zxc\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.894438 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-46zxc\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.894530 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6kkf\" (UniqueName: \"kubernetes.io/projected/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-kube-api-access-j6kkf\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-46zxc\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.902196 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-46zxc\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.902473 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-46zxc\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.913495 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6kkf\" (UniqueName: \"kubernetes.io/projected/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-kube-api-access-j6kkf\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-46zxc\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:31 crc kubenswrapper[4995]: I0120 17:04:31.954548 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:32 crc kubenswrapper[4995]: I0120 17:04:32.578747 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc"] Jan 20 17:04:33 crc kubenswrapper[4995]: I0120 17:04:33.569719 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" event={"ID":"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4","Type":"ContainerStarted","Data":"2e36b4acf196835079782340927fa2c410f9db0bbcb3af273cb30bdf119d70d7"} Jan 20 17:04:34 crc kubenswrapper[4995]: I0120 17:04:34.583873 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" event={"ID":"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4","Type":"ContainerStarted","Data":"5b33e300d8b454f52ba71e6de7c74b8064bab8cc50d148ecad941733434a8e27"} Jan 20 17:04:34 crc kubenswrapper[4995]: I0120 17:04:34.604864 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" podStartSLOduration=2.416423702 podStartE2EDuration="3.604842041s" podCreationTimestamp="2026-01-20 17:04:31 +0000 UTC" firstStartedPulling="2026-01-20 17:04:32.579799017 +0000 UTC m=+1990.824403813" lastFinishedPulling="2026-01-20 17:04:33.768217306 +0000 UTC m=+1992.012822152" observedRunningTime="2026-01-20 17:04:34.60371339 +0000 UTC m=+1992.848318216" watchObservedRunningTime="2026-01-20 17:04:34.604842041 +0000 UTC m=+1992.849446847" Jan 20 17:04:43 crc kubenswrapper[4995]: I0120 17:04:43.690230 4995 generic.go:334] "Generic (PLEG): container finished" podID="bcd29c3f-dbeb-439f-98f8-7d4aa39597d4" containerID="5b33e300d8b454f52ba71e6de7c74b8064bab8cc50d148ecad941733434a8e27" exitCode=0 Jan 20 17:04:43 crc kubenswrapper[4995]: I0120 17:04:43.690287 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" event={"ID":"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4","Type":"ContainerDied","Data":"5b33e300d8b454f52ba71e6de7c74b8064bab8cc50d148ecad941733434a8e27"} Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.131764 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.236881 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-ssh-key-openstack-edpm-ipam\") pod \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.237010 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6kkf\" (UniqueName: \"kubernetes.io/projected/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-kube-api-access-j6kkf\") pod \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.237204 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-inventory\") pod \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\" (UID: \"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4\") " Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.247338 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-kube-api-access-j6kkf" (OuterVolumeSpecName: "kube-api-access-j6kkf") pod "bcd29c3f-dbeb-439f-98f8-7d4aa39597d4" (UID: "bcd29c3f-dbeb-439f-98f8-7d4aa39597d4"). InnerVolumeSpecName "kube-api-access-j6kkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.277295 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-inventory" (OuterVolumeSpecName: "inventory") pod "bcd29c3f-dbeb-439f-98f8-7d4aa39597d4" (UID: "bcd29c3f-dbeb-439f-98f8-7d4aa39597d4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.284814 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "bcd29c3f-dbeb-439f-98f8-7d4aa39597d4" (UID: "bcd29c3f-dbeb-439f-98f8-7d4aa39597d4"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.340262 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.340307 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.340322 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6kkf\" (UniqueName: \"kubernetes.io/projected/bcd29c3f-dbeb-439f-98f8-7d4aa39597d4-kube-api-access-j6kkf\") on node \"crc\" DevicePath \"\"" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.713403 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" event={"ID":"bcd29c3f-dbeb-439f-98f8-7d4aa39597d4","Type":"ContainerDied","Data":"2e36b4acf196835079782340927fa2c410f9db0bbcb3af273cb30bdf119d70d7"} Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.713460 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e36b4acf196835079782340927fa2c410f9db0bbcb3af273cb30bdf119d70d7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.713498 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-46zxc" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.841033 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7"] Jan 20 17:04:45 crc kubenswrapper[4995]: E0120 17:04:45.841737 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcd29c3f-dbeb-439f-98f8-7d4aa39597d4" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.841771 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcd29c3f-dbeb-439f-98f8-7d4aa39597d4" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.842274 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcd29c3f-dbeb-439f-98f8-7d4aa39597d4" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.843644 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.853835 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.854328 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.854560 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.854654 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.857311 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.857642 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.858014 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzq7b\" (UniqueName: \"kubernetes.io/projected/152fb470-c7e5-4e8f-86b4-5e816d021183-kube-api-access-bzq7b\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.862674 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7"] Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.960162 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.960292 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.960360 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzq7b\" (UniqueName: \"kubernetes.io/projected/152fb470-c7e5-4e8f-86b4-5e816d021183-kube-api-access-bzq7b\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.966017 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.966126 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:45 crc kubenswrapper[4995]: I0120 17:04:45.981800 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzq7b\" (UniqueName: \"kubernetes.io/projected/152fb470-c7e5-4e8f-86b4-5e816d021183-kube-api-access-bzq7b\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:46 crc kubenswrapper[4995]: I0120 17:04:46.170268 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:04:46 crc kubenswrapper[4995]: I0120 17:04:46.751034 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7"] Jan 20 17:04:47 crc kubenswrapper[4995]: I0120 17:04:47.767482 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" event={"ID":"152fb470-c7e5-4e8f-86b4-5e816d021183","Type":"ContainerStarted","Data":"303e2d2fada6c66597e9ae2ff152358cec3b15e13ef658d25becd4486adca22e"} Jan 20 17:04:48 crc kubenswrapper[4995]: I0120 17:04:48.779846 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" event={"ID":"152fb470-c7e5-4e8f-86b4-5e816d021183","Type":"ContainerStarted","Data":"3fe58c67c1ef129f709a8bf046ce601f413c8f7f3223fb898c22041a1b4431bc"} Jan 20 17:04:48 crc kubenswrapper[4995]: I0120 17:04:48.815513 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" podStartSLOduration=2.9023502260000003 podStartE2EDuration="3.815476992s" podCreationTimestamp="2026-01-20 17:04:45 +0000 UTC" firstStartedPulling="2026-01-20 17:04:46.760381825 +0000 UTC m=+2005.004986641" lastFinishedPulling="2026-01-20 17:04:47.673508561 +0000 UTC m=+2005.918113407" observedRunningTime="2026-01-20 17:04:48.802657556 +0000 UTC m=+2007.047262402" watchObservedRunningTime="2026-01-20 17:04:48.815476992 +0000 UTC m=+2007.060081838" Jan 20 17:04:58 crc kubenswrapper[4995]: I0120 17:04:58.883197 4995 generic.go:334] "Generic (PLEG): container finished" podID="152fb470-c7e5-4e8f-86b4-5e816d021183" containerID="3fe58c67c1ef129f709a8bf046ce601f413c8f7f3223fb898c22041a1b4431bc" exitCode=0 Jan 20 17:04:58 crc kubenswrapper[4995]: I0120 17:04:58.883313 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" event={"ID":"152fb470-c7e5-4e8f-86b4-5e816d021183","Type":"ContainerDied","Data":"3fe58c67c1ef129f709a8bf046ce601f413c8f7f3223fb898c22041a1b4431bc"} Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.323487 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.511804 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzq7b\" (UniqueName: \"kubernetes.io/projected/152fb470-c7e5-4e8f-86b4-5e816d021183-kube-api-access-bzq7b\") pod \"152fb470-c7e5-4e8f-86b4-5e816d021183\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.512263 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-ssh-key-openstack-edpm-ipam\") pod \"152fb470-c7e5-4e8f-86b4-5e816d021183\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.512577 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-inventory\") pod \"152fb470-c7e5-4e8f-86b4-5e816d021183\" (UID: \"152fb470-c7e5-4e8f-86b4-5e816d021183\") " Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.520435 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/152fb470-c7e5-4e8f-86b4-5e816d021183-kube-api-access-bzq7b" (OuterVolumeSpecName: "kube-api-access-bzq7b") pod "152fb470-c7e5-4e8f-86b4-5e816d021183" (UID: "152fb470-c7e5-4e8f-86b4-5e816d021183"). InnerVolumeSpecName "kube-api-access-bzq7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.549564 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-inventory" (OuterVolumeSpecName: "inventory") pod "152fb470-c7e5-4e8f-86b4-5e816d021183" (UID: "152fb470-c7e5-4e8f-86b4-5e816d021183"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.557252 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "152fb470-c7e5-4e8f-86b4-5e816d021183" (UID: "152fb470-c7e5-4e8f-86b4-5e816d021183"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.571415 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.571485 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.615418 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.615454 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzq7b\" (UniqueName: \"kubernetes.io/projected/152fb470-c7e5-4e8f-86b4-5e816d021183-kube-api-access-bzq7b\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.615467 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/152fb470-c7e5-4e8f-86b4-5e816d021183-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.903716 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" event={"ID":"152fb470-c7e5-4e8f-86b4-5e816d021183","Type":"ContainerDied","Data":"303e2d2fada6c66597e9ae2ff152358cec3b15e13ef658d25becd4486adca22e"} Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.903761 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="303e2d2fada6c66597e9ae2ff152358cec3b15e13ef658d25becd4486adca22e" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.903799 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.976126 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z"] Jan 20 17:05:00 crc kubenswrapper[4995]: E0120 17:05:00.976518 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="152fb470-c7e5-4e8f-86b4-5e816d021183" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.976531 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="152fb470-c7e5-4e8f-86b4-5e816d021183" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.976709 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="152fb470-c7e5-4e8f-86b4-5e816d021183" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.977509 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.979512 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.980978 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.981163 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.981289 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.981297 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.981527 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.981572 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.981776 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 20 17:05:00 crc kubenswrapper[4995]: I0120 17:05:00.996997 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z"] Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.124936 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8jdr\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-kube-api-access-j8jdr\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.124988 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125025 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125065 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125173 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125210 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125250 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125288 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125334 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125359 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125394 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125422 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125448 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.125469 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226613 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226672 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226715 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226739 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226784 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226814 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226847 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226884 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226912 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226930 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226967 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8jdr\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-kube-api-access-j8jdr\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.226992 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.227020 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.227049 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.231758 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.233354 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.233670 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.233924 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.235165 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.235995 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.236124 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.236731 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.236840 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.237332 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.239179 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.243296 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.249027 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.253458 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8jdr\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-kube-api-access-j8jdr\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:01 crc kubenswrapper[4995]: I0120 17:05:01.299815 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:02 crc kubenswrapper[4995]: W0120 17:05:02.240620 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8079eec_4ec7_4979_8cd9_531c61418782.slice/crio-37668d951c5625d6a56f1af826b4e389edcd54775ad055e2cc6bad4675ebdb58 WatchSource:0}: Error finding container 37668d951c5625d6a56f1af826b4e389edcd54775ad055e2cc6bad4675ebdb58: Status 404 returned error can't find the container with id 37668d951c5625d6a56f1af826b4e389edcd54775ad055e2cc6bad4675ebdb58 Jan 20 17:05:02 crc kubenswrapper[4995]: I0120 17:05:02.247748 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z"] Jan 20 17:05:02 crc kubenswrapper[4995]: I0120 17:05:02.930276 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" event={"ID":"f8079eec-4ec7-4979-8cd9-531c61418782","Type":"ContainerStarted","Data":"37668d951c5625d6a56f1af826b4e389edcd54775ad055e2cc6bad4675ebdb58"} Jan 20 17:05:03 crc kubenswrapper[4995]: I0120 17:05:03.941872 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" event={"ID":"f8079eec-4ec7-4979-8cd9-531c61418782","Type":"ContainerStarted","Data":"dd2cbce9300e9798e56e6466859d1b61472ce4d9c328cd2e34c2f4be141d6eae"} Jan 20 17:05:03 crc kubenswrapper[4995]: I0120 17:05:03.979501 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" podStartSLOduration=3.039962139 podStartE2EDuration="3.979468468s" podCreationTimestamp="2026-01-20 17:05:00 +0000 UTC" firstStartedPulling="2026-01-20 17:05:02.242827165 +0000 UTC m=+2020.487431971" lastFinishedPulling="2026-01-20 17:05:03.182333504 +0000 UTC m=+2021.426938300" observedRunningTime="2026-01-20 17:05:03.964211495 +0000 UTC m=+2022.208816331" watchObservedRunningTime="2026-01-20 17:05:03.979468468 +0000 UTC m=+2022.224073314" Jan 20 17:05:30 crc kubenswrapper[4995]: I0120 17:05:30.571815 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:05:30 crc kubenswrapper[4995]: I0120 17:05:30.573291 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:05:30 crc kubenswrapper[4995]: I0120 17:05:30.573415 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:05:30 crc kubenswrapper[4995]: I0120 17:05:30.574613 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2dddbfd37aa100041abe11a85d1d53ea3e6d3d040244c5dbe43296741e29c68b"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:05:30 crc kubenswrapper[4995]: I0120 17:05:30.574724 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://2dddbfd37aa100041abe11a85d1d53ea3e6d3d040244c5dbe43296741e29c68b" gracePeriod=600 Jan 20 17:05:31 crc kubenswrapper[4995]: I0120 17:05:31.251022 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="2dddbfd37aa100041abe11a85d1d53ea3e6d3d040244c5dbe43296741e29c68b" exitCode=0 Jan 20 17:05:31 crc kubenswrapper[4995]: I0120 17:05:31.251108 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"2dddbfd37aa100041abe11a85d1d53ea3e6d3d040244c5dbe43296741e29c68b"} Jan 20 17:05:31 crc kubenswrapper[4995]: I0120 17:05:31.251883 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd"} Jan 20 17:05:31 crc kubenswrapper[4995]: I0120 17:05:31.251909 4995 scope.go:117] "RemoveContainer" containerID="6fb265dca1f50f2f599be2f67f3fb11d76069641daa207e905b2cc3d1d155954" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.374482 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nzn96"] Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.378792 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.392575 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nzn96"] Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.538488 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/144eba19-b4f0-48d9-a1f6-fc191b87c617-catalog-content\") pod \"redhat-operators-nzn96\" (UID: \"144eba19-b4f0-48d9-a1f6-fc191b87c617\") " pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.538584 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9kcm\" (UniqueName: \"kubernetes.io/projected/144eba19-b4f0-48d9-a1f6-fc191b87c617-kube-api-access-b9kcm\") pod \"redhat-operators-nzn96\" (UID: \"144eba19-b4f0-48d9-a1f6-fc191b87c617\") " pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.538737 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/144eba19-b4f0-48d9-a1f6-fc191b87c617-utilities\") pod \"redhat-operators-nzn96\" (UID: \"144eba19-b4f0-48d9-a1f6-fc191b87c617\") " pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.641339 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/144eba19-b4f0-48d9-a1f6-fc191b87c617-catalog-content\") pod \"redhat-operators-nzn96\" (UID: \"144eba19-b4f0-48d9-a1f6-fc191b87c617\") " pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.641425 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9kcm\" (UniqueName: \"kubernetes.io/projected/144eba19-b4f0-48d9-a1f6-fc191b87c617-kube-api-access-b9kcm\") pod \"redhat-operators-nzn96\" (UID: \"144eba19-b4f0-48d9-a1f6-fc191b87c617\") " pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.641532 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/144eba19-b4f0-48d9-a1f6-fc191b87c617-utilities\") pod \"redhat-operators-nzn96\" (UID: \"144eba19-b4f0-48d9-a1f6-fc191b87c617\") " pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.642120 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/144eba19-b4f0-48d9-a1f6-fc191b87c617-utilities\") pod \"redhat-operators-nzn96\" (UID: \"144eba19-b4f0-48d9-a1f6-fc191b87c617\") " pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.642428 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/144eba19-b4f0-48d9-a1f6-fc191b87c617-catalog-content\") pod \"redhat-operators-nzn96\" (UID: \"144eba19-b4f0-48d9-a1f6-fc191b87c617\") " pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.676439 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9kcm\" (UniqueName: \"kubernetes.io/projected/144eba19-b4f0-48d9-a1f6-fc191b87c617-kube-api-access-b9kcm\") pod \"redhat-operators-nzn96\" (UID: \"144eba19-b4f0-48d9-a1f6-fc191b87c617\") " pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:42 crc kubenswrapper[4995]: I0120 17:05:42.711773 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:05:43 crc kubenswrapper[4995]: I0120 17:05:43.168450 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nzn96"] Jan 20 17:05:43 crc kubenswrapper[4995]: I0120 17:05:43.387089 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzn96" event={"ID":"144eba19-b4f0-48d9-a1f6-fc191b87c617","Type":"ContainerStarted","Data":"89c83fafefebd469c3e9750495869616a460f13f026c3755c3af780d0bcbdcdc"} Jan 20 17:05:44 crc kubenswrapper[4995]: I0120 17:05:44.399198 4995 generic.go:334] "Generic (PLEG): container finished" podID="144eba19-b4f0-48d9-a1f6-fc191b87c617" containerID="b93b12559d7842fb744d5c4ea95bf96c59d11cbce38ca559d70e849167c92c3c" exitCode=0 Jan 20 17:05:44 crc kubenswrapper[4995]: I0120 17:05:44.399311 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzn96" event={"ID":"144eba19-b4f0-48d9-a1f6-fc191b87c617","Type":"ContainerDied","Data":"b93b12559d7842fb744d5c4ea95bf96c59d11cbce38ca559d70e849167c92c3c"} Jan 20 17:05:47 crc kubenswrapper[4995]: I0120 17:05:47.434205 4995 generic.go:334] "Generic (PLEG): container finished" podID="f8079eec-4ec7-4979-8cd9-531c61418782" containerID="dd2cbce9300e9798e56e6466859d1b61472ce4d9c328cd2e34c2f4be141d6eae" exitCode=0 Jan 20 17:05:47 crc kubenswrapper[4995]: I0120 17:05:47.434279 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" event={"ID":"f8079eec-4ec7-4979-8cd9-531c61418782","Type":"ContainerDied","Data":"dd2cbce9300e9798e56e6466859d1b61472ce4d9c328cd2e34c2f4be141d6eae"} Jan 20 17:05:48 crc kubenswrapper[4995]: I0120 17:05:48.895246 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.085465 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-repo-setup-combined-ca-bundle\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.085533 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.085594 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-neutron-metadata-combined-ca-bundle\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.085632 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-bootstrap-combined-ca-bundle\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.085682 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ssh-key-openstack-edpm-ipam\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.085754 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-ovn-default-certs-0\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.086411 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-inventory\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.086444 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-libvirt-combined-ca-bundle\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.086476 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ovn-combined-ca-bundle\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.086511 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.086571 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8jdr\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-kube-api-access-j8jdr\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.086635 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-telemetry-combined-ca-bundle\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.086660 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-nova-combined-ca-bundle\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.086687 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.099806 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.099889 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.101440 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.101501 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-kube-api-access-j8jdr" (OuterVolumeSpecName: "kube-api-access-j8jdr") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "kube-api-access-j8jdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.101992 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.102804 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.102929 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.103179 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.104880 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.107156 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.107254 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.107419 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: E0120 17:05:49.140540 4995 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ssh-key-openstack-edpm-ipam podName:f8079eec-4ec7-4979-8cd9-531c61418782 nodeName:}" failed. No retries permitted until 2026-01-20 17:05:49.640511229 +0000 UTC m=+2067.885116035 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key-openstack-edpm-ipam" (UniqueName: "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ssh-key-openstack-edpm-ipam") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782") : error deleting /var/lib/kubelet/pods/f8079eec-4ec7-4979-8cd9-531c61418782/volume-subpaths: remove /var/lib/kubelet/pods/f8079eec-4ec7-4979-8cd9-531c61418782/volume-subpaths: no such file or directory Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.145895 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-inventory" (OuterVolumeSpecName: "inventory") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191334 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191357 4995 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191367 4995 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191377 4995 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191386 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8jdr\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-kube-api-access-j8jdr\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191395 4995 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191404 4995 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191412 4995 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191421 4995 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191431 4995 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191439 4995 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191449 4995 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.191457 4995 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f8079eec-4ec7-4979-8cd9-531c61418782-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.455803 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" event={"ID":"f8079eec-4ec7-4979-8cd9-531c61418782","Type":"ContainerDied","Data":"37668d951c5625d6a56f1af826b4e389edcd54775ad055e2cc6bad4675ebdb58"} Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.455843 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="37668d951c5625d6a56f1af826b4e389edcd54775ad055e2cc6bad4675ebdb58" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.455887 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.549720 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl"] Jan 20 17:05:49 crc kubenswrapper[4995]: E0120 17:05:49.550231 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8079eec-4ec7-4979-8cd9-531c61418782" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.550258 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8079eec-4ec7-4979-8cd9-531c61418782" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.550492 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8079eec-4ec7-4979-8cd9-531c61418782" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.552822 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.559264 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.560125 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl"] Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.701574 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ssh-key-openstack-edpm-ipam\") pod \"f8079eec-4ec7-4979-8cd9-531c61418782\" (UID: \"f8079eec-4ec7-4979-8cd9-531c61418782\") " Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.701950 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw66k\" (UniqueName: \"kubernetes.io/projected/2c510d64-d6b7-41c0-a293-4528282415ec-kube-api-access-jw66k\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.702016 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/2c510d64-d6b7-41c0-a293-4528282415ec-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.702061 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.702120 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.702167 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.707578 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "f8079eec-4ec7-4979-8cd9-531c61418782" (UID: "f8079eec-4ec7-4979-8cd9-531c61418782"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.803657 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.803770 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw66k\" (UniqueName: \"kubernetes.io/projected/2c510d64-d6b7-41c0-a293-4528282415ec-kube-api-access-jw66k\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.803849 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/2c510d64-d6b7-41c0-a293-4528282415ec-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.803903 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.803968 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.804039 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/f8079eec-4ec7-4979-8cd9-531c61418782-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.804842 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/2c510d64-d6b7-41c0-a293-4528282415ec-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.807504 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.807730 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.808231 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.824814 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw66k\" (UniqueName: \"kubernetes.io/projected/2c510d64-d6b7-41c0-a293-4528282415ec-kube-api-access-jw66k\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-nwtzl\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:49 crc kubenswrapper[4995]: I0120 17:05:49.869539 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:05:50 crc kubenswrapper[4995]: I0120 17:05:50.420237 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl"] Jan 20 17:05:54 crc kubenswrapper[4995]: I0120 17:05:54.533460 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" event={"ID":"2c510d64-d6b7-41c0-a293-4528282415ec","Type":"ContainerStarted","Data":"1cd2b04b60865003259e11abb96e935c3494d6be36b7de32cb829872ddefdb1c"} Jan 20 17:05:54 crc kubenswrapper[4995]: I0120 17:05:54.536897 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzn96" event={"ID":"144eba19-b4f0-48d9-a1f6-fc191b87c617","Type":"ContainerStarted","Data":"7a9eb915b93ab7449413e18f9e2081eb2b0314d1cd2486a78690803c34b78aa9"} Jan 20 17:05:56 crc kubenswrapper[4995]: I0120 17:05:56.597643 4995 generic.go:334] "Generic (PLEG): container finished" podID="144eba19-b4f0-48d9-a1f6-fc191b87c617" containerID="7a9eb915b93ab7449413e18f9e2081eb2b0314d1cd2486a78690803c34b78aa9" exitCode=0 Jan 20 17:05:56 crc kubenswrapper[4995]: I0120 17:05:56.597742 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzn96" event={"ID":"144eba19-b4f0-48d9-a1f6-fc191b87c617","Type":"ContainerDied","Data":"7a9eb915b93ab7449413e18f9e2081eb2b0314d1cd2486a78690803c34b78aa9"} Jan 20 17:06:00 crc kubenswrapper[4995]: I0120 17:06:00.641920 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" event={"ID":"2c510d64-d6b7-41c0-a293-4528282415ec","Type":"ContainerStarted","Data":"f0ddb7a1e86d1a01765d6a5084cfef92f5ea4daf615f454f314b0f276fd25a2f"} Jan 20 17:06:00 crc kubenswrapper[4995]: I0120 17:06:00.646738 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nzn96" event={"ID":"144eba19-b4f0-48d9-a1f6-fc191b87c617","Type":"ContainerStarted","Data":"683f0416d5dc240d1996a6722053cfe8622db635feb13f57ed1ffbbb346b5753"} Jan 20 17:06:00 crc kubenswrapper[4995]: I0120 17:06:00.662469 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" podStartSLOduration=6.192503822 podStartE2EDuration="11.662445886s" podCreationTimestamp="2026-01-20 17:05:49 +0000 UTC" firstStartedPulling="2026-01-20 17:05:53.910822978 +0000 UTC m=+2072.155427794" lastFinishedPulling="2026-01-20 17:05:59.380765052 +0000 UTC m=+2077.625369858" observedRunningTime="2026-01-20 17:06:00.656707901 +0000 UTC m=+2078.901312717" watchObservedRunningTime="2026-01-20 17:06:00.662445886 +0000 UTC m=+2078.907050693" Jan 20 17:06:00 crc kubenswrapper[4995]: I0120 17:06:00.688770 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nzn96" podStartSLOduration=3.412314738 podStartE2EDuration="18.688749939s" podCreationTimestamp="2026-01-20 17:05:42 +0000 UTC" firstStartedPulling="2026-01-20 17:05:44.402190306 +0000 UTC m=+2062.646795122" lastFinishedPulling="2026-01-20 17:05:59.678625527 +0000 UTC m=+2077.923230323" observedRunningTime="2026-01-20 17:06:00.677645239 +0000 UTC m=+2078.922250055" watchObservedRunningTime="2026-01-20 17:06:00.688749939 +0000 UTC m=+2078.933354745" Jan 20 17:06:02 crc kubenswrapper[4995]: I0120 17:06:02.712636 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:06:02 crc kubenswrapper[4995]: I0120 17:06:02.716433 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:06:03 crc kubenswrapper[4995]: I0120 17:06:03.772183 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nzn96" podUID="144eba19-b4f0-48d9-a1f6-fc191b87c617" containerName="registry-server" probeResult="failure" output=< Jan 20 17:06:03 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 17:06:03 crc kubenswrapper[4995]: > Jan 20 17:06:12 crc kubenswrapper[4995]: I0120 17:06:12.793748 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:06:12 crc kubenswrapper[4995]: I0120 17:06:12.875487 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nzn96" Jan 20 17:06:13 crc kubenswrapper[4995]: I0120 17:06:13.401939 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nzn96"] Jan 20 17:06:13 crc kubenswrapper[4995]: I0120 17:06:13.573819 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c5n9d"] Jan 20 17:06:13 crc kubenswrapper[4995]: I0120 17:06:13.574141 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c5n9d" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerName="registry-server" containerID="cri-o://5bc7dc0502c7bd3541ee95af278707323b6be2b75197ab9f51a470c282ab95ac" gracePeriod=2 Jan 20 17:06:13 crc kubenswrapper[4995]: I0120 17:06:13.805684 4995 generic.go:334] "Generic (PLEG): container finished" podID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerID="5bc7dc0502c7bd3541ee95af278707323b6be2b75197ab9f51a470c282ab95ac" exitCode=0 Jan 20 17:06:13 crc kubenswrapper[4995]: I0120 17:06:13.806585 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5n9d" event={"ID":"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d","Type":"ContainerDied","Data":"5bc7dc0502c7bd3541ee95af278707323b6be2b75197ab9f51a470c282ab95ac"} Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.058280 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.222169 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mw2p9\" (UniqueName: \"kubernetes.io/projected/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-kube-api-access-mw2p9\") pod \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.222269 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-catalog-content\") pod \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.222488 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-utilities\") pod \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\" (UID: \"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d\") " Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.223527 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-utilities" (OuterVolumeSpecName: "utilities") pod "3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" (UID: "3f05a60b-593a-44c2-8c7e-0e7b1d86a15d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.232760 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-kube-api-access-mw2p9" (OuterVolumeSpecName: "kube-api-access-mw2p9") pod "3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" (UID: "3f05a60b-593a-44c2-8c7e-0e7b1d86a15d"). InnerVolumeSpecName "kube-api-access-mw2p9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.324541 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.324572 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mw2p9\" (UniqueName: \"kubernetes.io/projected/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-kube-api-access-mw2p9\") on node \"crc\" DevicePath \"\"" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.348746 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" (UID: "3f05a60b-593a-44c2-8c7e-0e7b1d86a15d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.425927 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.816803 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c5n9d" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.816798 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c5n9d" event={"ID":"3f05a60b-593a-44c2-8c7e-0e7b1d86a15d","Type":"ContainerDied","Data":"8afa0c7fd184ff3a545b6aa401d2955aa2c0d67e4ffe2bf6239eb902e804915e"} Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.817386 4995 scope.go:117] "RemoveContainer" containerID="5bc7dc0502c7bd3541ee95af278707323b6be2b75197ab9f51a470c282ab95ac" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.840285 4995 scope.go:117] "RemoveContainer" containerID="863092082e6276cb557d45a9aad37301d2630adc54b2312217db4b3b71410f6f" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.868631 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c5n9d"] Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.868925 4995 scope.go:117] "RemoveContainer" containerID="69f87e0ecac3b1ad4ad2bd46512c44d7f6fa8d0336b2f36939c68c21869ecd44" Jan 20 17:06:14 crc kubenswrapper[4995]: I0120 17:06:14.883500 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c5n9d"] Jan 20 17:06:16 crc kubenswrapper[4995]: I0120 17:06:16.001296 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" path="/var/lib/kubelet/pods/3f05a60b-593a-44c2-8c7e-0e7b1d86a15d/volumes" Jan 20 17:07:15 crc kubenswrapper[4995]: I0120 17:07:15.386436 4995 generic.go:334] "Generic (PLEG): container finished" podID="2c510d64-d6b7-41c0-a293-4528282415ec" containerID="f0ddb7a1e86d1a01765d6a5084cfef92f5ea4daf615f454f314b0f276fd25a2f" exitCode=0 Jan 20 17:07:15 crc kubenswrapper[4995]: I0120 17:07:15.386519 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" event={"ID":"2c510d64-d6b7-41c0-a293-4528282415ec","Type":"ContainerDied","Data":"f0ddb7a1e86d1a01765d6a5084cfef92f5ea4daf615f454f314b0f276fd25a2f"} Jan 20 17:07:16 crc kubenswrapper[4995]: I0120 17:07:16.874821 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.034039 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/2c510d64-d6b7-41c0-a293-4528282415ec-ovncontroller-config-0\") pod \"2c510d64-d6b7-41c0-a293-4528282415ec\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.034253 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-inventory\") pod \"2c510d64-d6b7-41c0-a293-4528282415ec\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.034290 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jw66k\" (UniqueName: \"kubernetes.io/projected/2c510d64-d6b7-41c0-a293-4528282415ec-kube-api-access-jw66k\") pod \"2c510d64-d6b7-41c0-a293-4528282415ec\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.034334 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ssh-key-openstack-edpm-ipam\") pod \"2c510d64-d6b7-41c0-a293-4528282415ec\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.034429 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ovn-combined-ca-bundle\") pod \"2c510d64-d6b7-41c0-a293-4528282415ec\" (UID: \"2c510d64-d6b7-41c0-a293-4528282415ec\") " Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.042355 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "2c510d64-d6b7-41c0-a293-4528282415ec" (UID: "2c510d64-d6b7-41c0-a293-4528282415ec"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.042576 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c510d64-d6b7-41c0-a293-4528282415ec-kube-api-access-jw66k" (OuterVolumeSpecName: "kube-api-access-jw66k") pod "2c510d64-d6b7-41c0-a293-4528282415ec" (UID: "2c510d64-d6b7-41c0-a293-4528282415ec"). InnerVolumeSpecName "kube-api-access-jw66k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.063494 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c510d64-d6b7-41c0-a293-4528282415ec-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "2c510d64-d6b7-41c0-a293-4528282415ec" (UID: "2c510d64-d6b7-41c0-a293-4528282415ec"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.064818 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-inventory" (OuterVolumeSpecName: "inventory") pod "2c510d64-d6b7-41c0-a293-4528282415ec" (UID: "2c510d64-d6b7-41c0-a293-4528282415ec"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.087778 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "2c510d64-d6b7-41c0-a293-4528282415ec" (UID: "2c510d64-d6b7-41c0-a293-4528282415ec"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.137383 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.137423 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jw66k\" (UniqueName: \"kubernetes.io/projected/2c510d64-d6b7-41c0-a293-4528282415ec-kube-api-access-jw66k\") on node \"crc\" DevicePath \"\"" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.137437 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.137450 4995 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c510d64-d6b7-41c0-a293-4528282415ec-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.137462 4995 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/2c510d64-d6b7-41c0-a293-4528282415ec-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.404313 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" event={"ID":"2c510d64-d6b7-41c0-a293-4528282415ec","Type":"ContainerDied","Data":"1cd2b04b60865003259e11abb96e935c3494d6be36b7de32cb829872ddefdb1c"} Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.404355 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cd2b04b60865003259e11abb96e935c3494d6be36b7de32cb829872ddefdb1c" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.404407 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-nwtzl" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.589938 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx"] Jan 20 17:07:17 crc kubenswrapper[4995]: E0120 17:07:17.590649 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerName="registry-server" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.590668 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerName="registry-server" Jan 20 17:07:17 crc kubenswrapper[4995]: E0120 17:07:17.590694 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerName="extract-content" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.590702 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerName="extract-content" Jan 20 17:07:17 crc kubenswrapper[4995]: E0120 17:07:17.590725 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerName="extract-utilities" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.590734 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerName="extract-utilities" Jan 20 17:07:17 crc kubenswrapper[4995]: E0120 17:07:17.590752 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c510d64-d6b7-41c0-a293-4528282415ec" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.590760 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c510d64-d6b7-41c0-a293-4528282415ec" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.590994 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f05a60b-593a-44c2-8c7e-0e7b1d86a15d" containerName="registry-server" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.591012 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c510d64-d6b7-41c0-a293-4528282415ec" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.591833 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.595930 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.595991 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.596181 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.596331 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.596487 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.596628 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.622519 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx"] Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.749393 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.749599 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.749704 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.749749 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.749869 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fgzj\" (UniqueName: \"kubernetes.io/projected/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-kube-api-access-2fgzj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.750005 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.852585 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.852711 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.852742 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.852777 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fgzj\" (UniqueName: \"kubernetes.io/projected/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-kube-api-access-2fgzj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.852820 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.852864 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.859359 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.865256 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.874471 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.877954 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.878032 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.892654 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fgzj\" (UniqueName: \"kubernetes.io/projected/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-kube-api-access-2fgzj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:17 crc kubenswrapper[4995]: I0120 17:07:17.927610 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:07:18 crc kubenswrapper[4995]: I0120 17:07:18.557705 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx"] Jan 20 17:07:19 crc kubenswrapper[4995]: I0120 17:07:19.438361 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" event={"ID":"1fd4a738-bf40-4e76-9ee2-79a8042e7c07","Type":"ContainerStarted","Data":"01f8d2c1c308b0bd1a5f5a11c71f0a757ed577c16ca187d41d971f8a481ff5be"} Jan 20 17:07:19 crc kubenswrapper[4995]: I0120 17:07:19.438771 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" event={"ID":"1fd4a738-bf40-4e76-9ee2-79a8042e7c07","Type":"ContainerStarted","Data":"c3a3037e20e256067fc427e6318cff33f16a471aba816832dd296abc769c3207"} Jan 20 17:07:19 crc kubenswrapper[4995]: I0120 17:07:19.456597 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" podStartSLOduration=1.961077477 podStartE2EDuration="2.456580414s" podCreationTimestamp="2026-01-20 17:07:17 +0000 UTC" firstStartedPulling="2026-01-20 17:07:18.56417044 +0000 UTC m=+2156.808775256" lastFinishedPulling="2026-01-20 17:07:19.059673387 +0000 UTC m=+2157.304278193" observedRunningTime="2026-01-20 17:07:19.454421686 +0000 UTC m=+2157.699026512" watchObservedRunningTime="2026-01-20 17:07:19.456580414 +0000 UTC m=+2157.701185220" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.518973 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5h7z5"] Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.521207 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.533720 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5h7z5"] Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.609263 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-utilities\") pod \"community-operators-5h7z5\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.609323 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-catalog-content\") pod \"community-operators-5h7z5\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.609403 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cgkl\" (UniqueName: \"kubernetes.io/projected/221cba2a-856a-4b6a-af14-fb024acdc689-kube-api-access-2cgkl\") pod \"community-operators-5h7z5\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.711360 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-utilities\") pod \"community-operators-5h7z5\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.711423 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-catalog-content\") pod \"community-operators-5h7z5\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.711534 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cgkl\" (UniqueName: \"kubernetes.io/projected/221cba2a-856a-4b6a-af14-fb024acdc689-kube-api-access-2cgkl\") pod \"community-operators-5h7z5\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.712367 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-utilities\") pod \"community-operators-5h7z5\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.712641 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-catalog-content\") pod \"community-operators-5h7z5\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.736645 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cgkl\" (UniqueName: \"kubernetes.io/projected/221cba2a-856a-4b6a-af14-fb024acdc689-kube-api-access-2cgkl\") pod \"community-operators-5h7z5\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:25 crc kubenswrapper[4995]: I0120 17:07:25.850820 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:26 crc kubenswrapper[4995]: I0120 17:07:26.368285 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5h7z5"] Jan 20 17:07:26 crc kubenswrapper[4995]: I0120 17:07:26.505106 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5h7z5" event={"ID":"221cba2a-856a-4b6a-af14-fb024acdc689","Type":"ContainerStarted","Data":"e589eb598ef83d26031d50a455d7d456184209372100586a1fb92f66b25e3cf8"} Jan 20 17:07:27 crc kubenswrapper[4995]: I0120 17:07:27.513453 4995 generic.go:334] "Generic (PLEG): container finished" podID="221cba2a-856a-4b6a-af14-fb024acdc689" containerID="b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57" exitCode=0 Jan 20 17:07:27 crc kubenswrapper[4995]: I0120 17:07:27.513575 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5h7z5" event={"ID":"221cba2a-856a-4b6a-af14-fb024acdc689","Type":"ContainerDied","Data":"b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57"} Jan 20 17:07:29 crc kubenswrapper[4995]: I0120 17:07:29.539782 4995 generic.go:334] "Generic (PLEG): container finished" podID="221cba2a-856a-4b6a-af14-fb024acdc689" containerID="47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b" exitCode=0 Jan 20 17:07:29 crc kubenswrapper[4995]: I0120 17:07:29.539887 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5h7z5" event={"ID":"221cba2a-856a-4b6a-af14-fb024acdc689","Type":"ContainerDied","Data":"47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b"} Jan 20 17:07:30 crc kubenswrapper[4995]: I0120 17:07:30.551950 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5h7z5" event={"ID":"221cba2a-856a-4b6a-af14-fb024acdc689","Type":"ContainerStarted","Data":"b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b"} Jan 20 17:07:30 crc kubenswrapper[4995]: I0120 17:07:30.571544 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:07:30 crc kubenswrapper[4995]: I0120 17:07:30.571619 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:07:30 crc kubenswrapper[4995]: I0120 17:07:30.573857 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5h7z5" podStartSLOduration=3.082445344 podStartE2EDuration="5.573840145s" podCreationTimestamp="2026-01-20 17:07:25 +0000 UTC" firstStartedPulling="2026-01-20 17:07:27.515179123 +0000 UTC m=+2165.759783929" lastFinishedPulling="2026-01-20 17:07:30.006573884 +0000 UTC m=+2168.251178730" observedRunningTime="2026-01-20 17:07:30.570674139 +0000 UTC m=+2168.815278955" watchObservedRunningTime="2026-01-20 17:07:30.573840145 +0000 UTC m=+2168.818444951" Jan 20 17:07:35 crc kubenswrapper[4995]: I0120 17:07:35.851189 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:35 crc kubenswrapper[4995]: I0120 17:07:35.851706 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:35 crc kubenswrapper[4995]: I0120 17:07:35.909802 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:36 crc kubenswrapper[4995]: I0120 17:07:36.728481 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:36 crc kubenswrapper[4995]: I0120 17:07:36.811139 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5h7z5"] Jan 20 17:07:38 crc kubenswrapper[4995]: I0120 17:07:38.665279 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5h7z5" podUID="221cba2a-856a-4b6a-af14-fb024acdc689" containerName="registry-server" containerID="cri-o://b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b" gracePeriod=2 Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.246205 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.305318 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-catalog-content\") pod \"221cba2a-856a-4b6a-af14-fb024acdc689\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.306432 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cgkl\" (UniqueName: \"kubernetes.io/projected/221cba2a-856a-4b6a-af14-fb024acdc689-kube-api-access-2cgkl\") pod \"221cba2a-856a-4b6a-af14-fb024acdc689\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.306610 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-utilities\") pod \"221cba2a-856a-4b6a-af14-fb024acdc689\" (UID: \"221cba2a-856a-4b6a-af14-fb024acdc689\") " Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.307730 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-utilities" (OuterVolumeSpecName: "utilities") pod "221cba2a-856a-4b6a-af14-fb024acdc689" (UID: "221cba2a-856a-4b6a-af14-fb024acdc689"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.307890 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.313292 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/221cba2a-856a-4b6a-af14-fb024acdc689-kube-api-access-2cgkl" (OuterVolumeSpecName: "kube-api-access-2cgkl") pod "221cba2a-856a-4b6a-af14-fb024acdc689" (UID: "221cba2a-856a-4b6a-af14-fb024acdc689"). InnerVolumeSpecName "kube-api-access-2cgkl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.409765 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cgkl\" (UniqueName: \"kubernetes.io/projected/221cba2a-856a-4b6a-af14-fb024acdc689-kube-api-access-2cgkl\") on node \"crc\" DevicePath \"\"" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.607986 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "221cba2a-856a-4b6a-af14-fb024acdc689" (UID: "221cba2a-856a-4b6a-af14-fb024acdc689"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.612759 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/221cba2a-856a-4b6a-af14-fb024acdc689-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.678163 4995 generic.go:334] "Generic (PLEG): container finished" podID="221cba2a-856a-4b6a-af14-fb024acdc689" containerID="b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b" exitCode=0 Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.678192 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5h7z5" event={"ID":"221cba2a-856a-4b6a-af14-fb024acdc689","Type":"ContainerDied","Data":"b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b"} Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.678252 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5h7z5" event={"ID":"221cba2a-856a-4b6a-af14-fb024acdc689","Type":"ContainerDied","Data":"e589eb598ef83d26031d50a455d7d456184209372100586a1fb92f66b25e3cf8"} Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.678292 4995 scope.go:117] "RemoveContainer" containerID="b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.678285 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5h7z5" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.706026 4995 scope.go:117] "RemoveContainer" containerID="47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.726389 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5h7z5"] Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.730266 4995 scope.go:117] "RemoveContainer" containerID="b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.735275 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5h7z5"] Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.783179 4995 scope.go:117] "RemoveContainer" containerID="b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b" Jan 20 17:07:39 crc kubenswrapper[4995]: E0120 17:07:39.783672 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b\": container with ID starting with b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b not found: ID does not exist" containerID="b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.783725 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b"} err="failed to get container status \"b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b\": rpc error: code = NotFound desc = could not find container \"b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b\": container with ID starting with b6e326cdaf66a936ea2c66f58c26cb2e419cbb224eeb58755953483a181e087b not found: ID does not exist" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.783760 4995 scope.go:117] "RemoveContainer" containerID="47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b" Jan 20 17:07:39 crc kubenswrapper[4995]: E0120 17:07:39.784189 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b\": container with ID starting with 47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b not found: ID does not exist" containerID="47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.784336 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b"} err="failed to get container status \"47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b\": rpc error: code = NotFound desc = could not find container \"47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b\": container with ID starting with 47e615aa35fdea7a053ec701d7df143fd898eba96b60cd1259b2f01d63b22f4b not found: ID does not exist" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.784381 4995 scope.go:117] "RemoveContainer" containerID="b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57" Jan 20 17:07:39 crc kubenswrapper[4995]: E0120 17:07:39.784926 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57\": container with ID starting with b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57 not found: ID does not exist" containerID="b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57" Jan 20 17:07:39 crc kubenswrapper[4995]: I0120 17:07:39.784964 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57"} err="failed to get container status \"b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57\": rpc error: code = NotFound desc = could not find container \"b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57\": container with ID starting with b8d87f6d2178de3270d2e612adc969e35f744049a641160cbdb6829f51d7cf57 not found: ID does not exist" Jan 20 17:07:40 crc kubenswrapper[4995]: I0120 17:07:40.005438 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="221cba2a-856a-4b6a-af14-fb024acdc689" path="/var/lib/kubelet/pods/221cba2a-856a-4b6a-af14-fb024acdc689/volumes" Jan 20 17:08:00 crc kubenswrapper[4995]: I0120 17:08:00.571501 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:08:00 crc kubenswrapper[4995]: I0120 17:08:00.572153 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:08:15 crc kubenswrapper[4995]: I0120 17:08:15.044184 4995 generic.go:334] "Generic (PLEG): container finished" podID="1fd4a738-bf40-4e76-9ee2-79a8042e7c07" containerID="01f8d2c1c308b0bd1a5f5a11c71f0a757ed577c16ca187d41d971f8a481ff5be" exitCode=0 Jan 20 17:08:15 crc kubenswrapper[4995]: I0120 17:08:15.044272 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" event={"ID":"1fd4a738-bf40-4e76-9ee2-79a8042e7c07","Type":"ContainerDied","Data":"01f8d2c1c308b0bd1a5f5a11c71f0a757ed577c16ca187d41d971f8a481ff5be"} Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.475628 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.627866 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-inventory\") pod \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.627922 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-nova-metadata-neutron-config-0\") pod \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.627995 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fgzj\" (UniqueName: \"kubernetes.io/projected/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-kube-api-access-2fgzj\") pod \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.628066 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-ovn-metadata-agent-neutron-config-0\") pod \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.628191 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-metadata-combined-ca-bundle\") pod \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.628257 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-ssh-key-openstack-edpm-ipam\") pod \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\" (UID: \"1fd4a738-bf40-4e76-9ee2-79a8042e7c07\") " Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.635787 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-kube-api-access-2fgzj" (OuterVolumeSpecName: "kube-api-access-2fgzj") pod "1fd4a738-bf40-4e76-9ee2-79a8042e7c07" (UID: "1fd4a738-bf40-4e76-9ee2-79a8042e7c07"). InnerVolumeSpecName "kube-api-access-2fgzj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.638975 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "1fd4a738-bf40-4e76-9ee2-79a8042e7c07" (UID: "1fd4a738-bf40-4e76-9ee2-79a8042e7c07"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.659050 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "1fd4a738-bf40-4e76-9ee2-79a8042e7c07" (UID: "1fd4a738-bf40-4e76-9ee2-79a8042e7c07"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.659626 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1fd4a738-bf40-4e76-9ee2-79a8042e7c07" (UID: "1fd4a738-bf40-4e76-9ee2-79a8042e7c07"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.668055 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "1fd4a738-bf40-4e76-9ee2-79a8042e7c07" (UID: "1fd4a738-bf40-4e76-9ee2-79a8042e7c07"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.670433 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-inventory" (OuterVolumeSpecName: "inventory") pod "1fd4a738-bf40-4e76-9ee2-79a8042e7c07" (UID: "1fd4a738-bf40-4e76-9ee2-79a8042e7c07"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.731196 4995 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.731239 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fgzj\" (UniqueName: \"kubernetes.io/projected/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-kube-api-access-2fgzj\") on node \"crc\" DevicePath \"\"" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.731258 4995 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.731271 4995 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.731285 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:08:16 crc kubenswrapper[4995]: I0120 17:08:16.731297 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1fd4a738-bf40-4e76-9ee2-79a8042e7c07-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.067141 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" event={"ID":"1fd4a738-bf40-4e76-9ee2-79a8042e7c07","Type":"ContainerDied","Data":"c3a3037e20e256067fc427e6318cff33f16a471aba816832dd296abc769c3207"} Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.067184 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c3a3037e20e256067fc427e6318cff33f16a471aba816832dd296abc769c3207" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.067239 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.153626 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz"] Jan 20 17:08:17 crc kubenswrapper[4995]: E0120 17:08:17.154216 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fd4a738-bf40-4e76-9ee2-79a8042e7c07" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.154245 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fd4a738-bf40-4e76-9ee2-79a8042e7c07" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 20 17:08:17 crc kubenswrapper[4995]: E0120 17:08:17.154273 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="221cba2a-856a-4b6a-af14-fb024acdc689" containerName="registry-server" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.154285 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="221cba2a-856a-4b6a-af14-fb024acdc689" containerName="registry-server" Jan 20 17:08:17 crc kubenswrapper[4995]: E0120 17:08:17.154348 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="221cba2a-856a-4b6a-af14-fb024acdc689" containerName="extract-utilities" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.154362 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="221cba2a-856a-4b6a-af14-fb024acdc689" containerName="extract-utilities" Jan 20 17:08:17 crc kubenswrapper[4995]: E0120 17:08:17.154386 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="221cba2a-856a-4b6a-af14-fb024acdc689" containerName="extract-content" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.154427 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="221cba2a-856a-4b6a-af14-fb024acdc689" containerName="extract-content" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.154771 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="221cba2a-856a-4b6a-af14-fb024acdc689" containerName="registry-server" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.154827 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fd4a738-bf40-4e76-9ee2-79a8042e7c07" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.155871 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.157878 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.159299 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.159422 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.159592 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.159924 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.183635 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz"] Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.341563 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.341609 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.341684 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.341718 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.341741 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nkgj\" (UniqueName: \"kubernetes.io/projected/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-kube-api-access-8nkgj\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.443378 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.443447 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.443523 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.443578 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.443608 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nkgj\" (UniqueName: \"kubernetes.io/projected/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-kube-api-access-8nkgj\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.447126 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.447544 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.449544 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.453324 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.461509 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nkgj\" (UniqueName: \"kubernetes.io/projected/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-kube-api-access-8nkgj\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:17 crc kubenswrapper[4995]: I0120 17:08:17.487867 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:08:18 crc kubenswrapper[4995]: I0120 17:08:18.026348 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz"] Jan 20 17:08:18 crc kubenswrapper[4995]: W0120 17:08:18.031518 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34c57bed_2d89_4ce3_9613_eb3ec4fb222b.slice/crio-720ae117fbfe0a123b05d57c34eb794b2492ccba28b0fdbdb8d86dc1317034cf WatchSource:0}: Error finding container 720ae117fbfe0a123b05d57c34eb794b2492ccba28b0fdbdb8d86dc1317034cf: Status 404 returned error can't find the container with id 720ae117fbfe0a123b05d57c34eb794b2492ccba28b0fdbdb8d86dc1317034cf Jan 20 17:08:18 crc kubenswrapper[4995]: I0120 17:08:18.078200 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" event={"ID":"34c57bed-2d89-4ce3-9613-eb3ec4fb222b","Type":"ContainerStarted","Data":"720ae117fbfe0a123b05d57c34eb794b2492ccba28b0fdbdb8d86dc1317034cf"} Jan 20 17:08:19 crc kubenswrapper[4995]: I0120 17:08:19.090103 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" event={"ID":"34c57bed-2d89-4ce3-9613-eb3ec4fb222b","Type":"ContainerStarted","Data":"785f2a3bfba4b39297931f28231602e4d087cdf54ae6363cf20e50d1951d9004"} Jan 20 17:08:19 crc kubenswrapper[4995]: I0120 17:08:19.114890 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" podStartSLOduration=1.553056032 podStartE2EDuration="2.114866715s" podCreationTimestamp="2026-01-20 17:08:17 +0000 UTC" firstStartedPulling="2026-01-20 17:08:18.034005918 +0000 UTC m=+2216.278610714" lastFinishedPulling="2026-01-20 17:08:18.595816581 +0000 UTC m=+2216.840421397" observedRunningTime="2026-01-20 17:08:19.109366316 +0000 UTC m=+2217.353971122" watchObservedRunningTime="2026-01-20 17:08:19.114866715 +0000 UTC m=+2217.359471521" Jan 20 17:08:30 crc kubenswrapper[4995]: I0120 17:08:30.571458 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:08:30 crc kubenswrapper[4995]: I0120 17:08:30.572277 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:08:30 crc kubenswrapper[4995]: I0120 17:08:30.572356 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:08:30 crc kubenswrapper[4995]: I0120 17:08:30.573453 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:08:30 crc kubenswrapper[4995]: I0120 17:08:30.573547 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" gracePeriod=600 Jan 20 17:08:30 crc kubenswrapper[4995]: E0120 17:08:30.692576 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:08:31 crc kubenswrapper[4995]: I0120 17:08:31.210330 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" exitCode=0 Jan 20 17:08:31 crc kubenswrapper[4995]: I0120 17:08:31.210398 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd"} Jan 20 17:08:31 crc kubenswrapper[4995]: I0120 17:08:31.211397 4995 scope.go:117] "RemoveContainer" containerID="2dddbfd37aa100041abe11a85d1d53ea3e6d3d040244c5dbe43296741e29c68b" Jan 20 17:08:31 crc kubenswrapper[4995]: I0120 17:08:31.212630 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:08:31 crc kubenswrapper[4995]: E0120 17:08:31.213369 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:08:45 crc kubenswrapper[4995]: I0120 17:08:45.989638 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:08:45 crc kubenswrapper[4995]: E0120 17:08:45.991289 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:08:57 crc kubenswrapper[4995]: I0120 17:08:57.990214 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:08:57 crc kubenswrapper[4995]: E0120 17:08:57.991236 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:09:10 crc kubenswrapper[4995]: I0120 17:09:10.990037 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:09:10 crc kubenswrapper[4995]: E0120 17:09:10.992042 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:09:22 crc kubenswrapper[4995]: I0120 17:09:22.004672 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:09:22 crc kubenswrapper[4995]: E0120 17:09:22.006064 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:09:35 crc kubenswrapper[4995]: I0120 17:09:35.989773 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:09:35 crc kubenswrapper[4995]: E0120 17:09:35.991025 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:09:48 crc kubenswrapper[4995]: I0120 17:09:48.990852 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:09:48 crc kubenswrapper[4995]: E0120 17:09:48.992611 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:10:03 crc kubenswrapper[4995]: I0120 17:10:03.989743 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:10:03 crc kubenswrapper[4995]: E0120 17:10:03.990585 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:10:15 crc kubenswrapper[4995]: I0120 17:10:15.990208 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:10:15 crc kubenswrapper[4995]: E0120 17:10:15.991229 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:10:29 crc kubenswrapper[4995]: I0120 17:10:29.990140 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:10:29 crc kubenswrapper[4995]: E0120 17:10:29.991042 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:10:43 crc kubenswrapper[4995]: I0120 17:10:43.990544 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:10:43 crc kubenswrapper[4995]: E0120 17:10:43.991957 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:10:56 crc kubenswrapper[4995]: I0120 17:10:55.999643 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:10:56 crc kubenswrapper[4995]: E0120 17:10:56.001224 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:11:06 crc kubenswrapper[4995]: I0120 17:11:06.990107 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:11:06 crc kubenswrapper[4995]: E0120 17:11:06.992132 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:11:18 crc kubenswrapper[4995]: I0120 17:11:18.990315 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:11:18 crc kubenswrapper[4995]: E0120 17:11:18.991210 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:11:31 crc kubenswrapper[4995]: I0120 17:11:31.995235 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:11:31 crc kubenswrapper[4995]: E0120 17:11:31.996059 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:11:44 crc kubenswrapper[4995]: I0120 17:11:44.989863 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:11:44 crc kubenswrapper[4995]: E0120 17:11:44.990704 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:11:55 crc kubenswrapper[4995]: I0120 17:11:55.989246 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:11:55 crc kubenswrapper[4995]: E0120 17:11:55.990116 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:12:09 crc kubenswrapper[4995]: I0120 17:12:09.990114 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:12:09 crc kubenswrapper[4995]: E0120 17:12:09.991157 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:12:21 crc kubenswrapper[4995]: I0120 17:12:21.996298 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:12:21 crc kubenswrapper[4995]: E0120 17:12:21.997224 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:12:35 crc kubenswrapper[4995]: I0120 17:12:35.989750 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:12:35 crc kubenswrapper[4995]: E0120 17:12:35.990767 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:12:46 crc kubenswrapper[4995]: I0120 17:12:46.989466 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:12:46 crc kubenswrapper[4995]: E0120 17:12:46.990976 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:13:00 crc kubenswrapper[4995]: I0120 17:13:00.990646 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:13:00 crc kubenswrapper[4995]: E0120 17:13:00.991263 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:13:04 crc kubenswrapper[4995]: I0120 17:13:04.029902 4995 generic.go:334] "Generic (PLEG): container finished" podID="34c57bed-2d89-4ce3-9613-eb3ec4fb222b" containerID="785f2a3bfba4b39297931f28231602e4d087cdf54ae6363cf20e50d1951d9004" exitCode=0 Jan 20 17:13:04 crc kubenswrapper[4995]: I0120 17:13:04.030043 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" event={"ID":"34c57bed-2d89-4ce3-9613-eb3ec4fb222b","Type":"ContainerDied","Data":"785f2a3bfba4b39297931f28231602e4d087cdf54ae6363cf20e50d1951d9004"} Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.467268 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.631849 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-secret-0\") pod \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.632011 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-inventory\") pod \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.632250 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nkgj\" (UniqueName: \"kubernetes.io/projected/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-kube-api-access-8nkgj\") pod \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.632338 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-ssh-key-openstack-edpm-ipam\") pod \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.632411 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-combined-ca-bundle\") pod \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\" (UID: \"34c57bed-2d89-4ce3-9613-eb3ec4fb222b\") " Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.640487 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "34c57bed-2d89-4ce3-9613-eb3ec4fb222b" (UID: "34c57bed-2d89-4ce3-9613-eb3ec4fb222b"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.643452 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-kube-api-access-8nkgj" (OuterVolumeSpecName: "kube-api-access-8nkgj") pod "34c57bed-2d89-4ce3-9613-eb3ec4fb222b" (UID: "34c57bed-2d89-4ce3-9613-eb3ec4fb222b"). InnerVolumeSpecName "kube-api-access-8nkgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.665790 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "34c57bed-2d89-4ce3-9613-eb3ec4fb222b" (UID: "34c57bed-2d89-4ce3-9613-eb3ec4fb222b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.668609 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "34c57bed-2d89-4ce3-9613-eb3ec4fb222b" (UID: "34c57bed-2d89-4ce3-9613-eb3ec4fb222b"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.684775 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-inventory" (OuterVolumeSpecName: "inventory") pod "34c57bed-2d89-4ce3-9613-eb3ec4fb222b" (UID: "34c57bed-2d89-4ce3-9613-eb3ec4fb222b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.736517 4995 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.736561 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.736580 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8nkgj\" (UniqueName: \"kubernetes.io/projected/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-kube-api-access-8nkgj\") on node \"crc\" DevicePath \"\"" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.736600 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:13:05 crc kubenswrapper[4995]: I0120 17:13:05.736617 4995 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34c57bed-2d89-4ce3-9613-eb3ec4fb222b-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.050732 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" event={"ID":"34c57bed-2d89-4ce3-9613-eb3ec4fb222b","Type":"ContainerDied","Data":"720ae117fbfe0a123b05d57c34eb794b2492ccba28b0fdbdb8d86dc1317034cf"} Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.050768 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="720ae117fbfe0a123b05d57c34eb794b2492ccba28b0fdbdb8d86dc1317034cf" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.050799 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.151596 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc"] Jan 20 17:13:06 crc kubenswrapper[4995]: E0120 17:13:06.152045 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34c57bed-2d89-4ce3-9613-eb3ec4fb222b" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.152090 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="34c57bed-2d89-4ce3-9613-eb3ec4fb222b" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.152337 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="34c57bed-2d89-4ce3-9613-eb3ec4fb222b" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.153274 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.156799 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.157060 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.157276 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.157463 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.157659 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.157884 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.164071 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.166534 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc"] Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.349345 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.350162 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.350223 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.350319 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.350417 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvd4m\" (UniqueName: \"kubernetes.io/projected/a463f304-1432-497f-9f19-3cd3b4d05da2-kube-api-access-wvd4m\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.350489 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.350545 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.350585 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.350681 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.452656 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.452719 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvd4m\" (UniqueName: \"kubernetes.io/projected/a463f304-1432-497f-9f19-3cd3b4d05da2-kube-api-access-wvd4m\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.452784 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.453202 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.453231 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.454270 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.454148 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.454862 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.455057 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.455143 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.458135 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.458741 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.459031 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.460029 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.461273 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.462026 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.463416 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.474416 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvd4m\" (UniqueName: \"kubernetes.io/projected/a463f304-1432-497f-9f19-3cd3b4d05da2-kube-api-access-wvd4m\") pod \"nova-edpm-deployment-openstack-edpm-ipam-f6vqc\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:06 crc kubenswrapper[4995]: I0120 17:13:06.476931 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:13:07 crc kubenswrapper[4995]: I0120 17:13:07.091753 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc"] Jan 20 17:13:07 crc kubenswrapper[4995]: I0120 17:13:07.096847 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 17:13:08 crc kubenswrapper[4995]: I0120 17:13:08.073741 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" event={"ID":"a463f304-1432-497f-9f19-3cd3b4d05da2","Type":"ContainerStarted","Data":"656ada2ebd93d1994b7e8a487d0ff7fe8cd3f954a65f085bf653fafb7ad2c363"} Jan 20 17:13:08 crc kubenswrapper[4995]: I0120 17:13:08.074514 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" event={"ID":"a463f304-1432-497f-9f19-3cd3b4d05da2","Type":"ContainerStarted","Data":"9c59f8994a34399c2d75bd7c9521c4360e2295daf185ab28acfaafe78a1d9196"} Jan 20 17:13:08 crc kubenswrapper[4995]: I0120 17:13:08.103787 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" podStartSLOduration=1.6656395449999999 podStartE2EDuration="2.103767388s" podCreationTimestamp="2026-01-20 17:13:06 +0000 UTC" firstStartedPulling="2026-01-20 17:13:07.096466272 +0000 UTC m=+2505.341071078" lastFinishedPulling="2026-01-20 17:13:07.534594105 +0000 UTC m=+2505.779198921" observedRunningTime="2026-01-20 17:13:08.097904948 +0000 UTC m=+2506.342509784" watchObservedRunningTime="2026-01-20 17:13:08.103767388 +0000 UTC m=+2506.348372204" Jan 20 17:13:14 crc kubenswrapper[4995]: I0120 17:13:14.989350 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:13:14 crc kubenswrapper[4995]: E0120 17:13:14.989940 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:13:26 crc kubenswrapper[4995]: I0120 17:13:26.989872 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:13:26 crc kubenswrapper[4995]: E0120 17:13:26.991150 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:13:40 crc kubenswrapper[4995]: I0120 17:13:40.990583 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:13:41 crc kubenswrapper[4995]: I0120 17:13:41.457716 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"b1a719c573a74fe67fc6e576c604a61c3db39096bff0ed702a9e56ffca19d5c9"} Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.043933 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lxstp"] Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.046601 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.056645 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lxstp"] Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.114959 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-utilities\") pod \"certified-operators-lxstp\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.116015 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmg44\" (UniqueName: \"kubernetes.io/projected/c5d36177-2cf0-4041-b811-5a4197a18876-kube-api-access-fmg44\") pod \"certified-operators-lxstp\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.116217 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-catalog-content\") pod \"certified-operators-lxstp\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.217992 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-catalog-content\") pod \"certified-operators-lxstp\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.218229 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-utilities\") pod \"certified-operators-lxstp\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.218253 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmg44\" (UniqueName: \"kubernetes.io/projected/c5d36177-2cf0-4041-b811-5a4197a18876-kube-api-access-fmg44\") pod \"certified-operators-lxstp\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.218887 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-catalog-content\") pod \"certified-operators-lxstp\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.219755 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-utilities\") pod \"certified-operators-lxstp\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.245156 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmg44\" (UniqueName: \"kubernetes.io/projected/c5d36177-2cf0-4041-b811-5a4197a18876-kube-api-access-fmg44\") pod \"certified-operators-lxstp\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.385927 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.927704 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lxstp"] Jan 20 17:14:30 crc kubenswrapper[4995]: W0120 17:14:30.930104 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5d36177_2cf0_4041_b811_5a4197a18876.slice/crio-6ac66ae8d1868d613db9e485371c71254e8a7b1730137d55c08b15fa45f7b67e WatchSource:0}: Error finding container 6ac66ae8d1868d613db9e485371c71254e8a7b1730137d55c08b15fa45f7b67e: Status 404 returned error can't find the container with id 6ac66ae8d1868d613db9e485371c71254e8a7b1730137d55c08b15fa45f7b67e Jan 20 17:14:30 crc kubenswrapper[4995]: I0120 17:14:30.950400 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxstp" event={"ID":"c5d36177-2cf0-4041-b811-5a4197a18876","Type":"ContainerStarted","Data":"6ac66ae8d1868d613db9e485371c71254e8a7b1730137d55c08b15fa45f7b67e"} Jan 20 17:14:31 crc kubenswrapper[4995]: I0120 17:14:31.972055 4995 generic.go:334] "Generic (PLEG): container finished" podID="c5d36177-2cf0-4041-b811-5a4197a18876" containerID="f6cd6e5d33aa5567776bda0ae25b74b20aeb886dee3f6f68d35d51b60c54acab" exitCode=0 Jan 20 17:14:31 crc kubenswrapper[4995]: I0120 17:14:31.973677 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxstp" event={"ID":"c5d36177-2cf0-4041-b811-5a4197a18876","Type":"ContainerDied","Data":"f6cd6e5d33aa5567776bda0ae25b74b20aeb886dee3f6f68d35d51b60c54acab"} Jan 20 17:14:32 crc kubenswrapper[4995]: I0120 17:14:32.984111 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxstp" event={"ID":"c5d36177-2cf0-4041-b811-5a4197a18876","Type":"ContainerStarted","Data":"564ea4f265ea87afd5de4149c6049af857d8204f447edc31a1f3b41d6b338c36"} Jan 20 17:14:33 crc kubenswrapper[4995]: I0120 17:14:33.995684 4995 generic.go:334] "Generic (PLEG): container finished" podID="c5d36177-2cf0-4041-b811-5a4197a18876" containerID="564ea4f265ea87afd5de4149c6049af857d8204f447edc31a1f3b41d6b338c36" exitCode=0 Jan 20 17:14:34 crc kubenswrapper[4995]: I0120 17:14:34.003601 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxstp" event={"ID":"c5d36177-2cf0-4041-b811-5a4197a18876","Type":"ContainerDied","Data":"564ea4f265ea87afd5de4149c6049af857d8204f447edc31a1f3b41d6b338c36"} Jan 20 17:14:35 crc kubenswrapper[4995]: I0120 17:14:35.010213 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxstp" event={"ID":"c5d36177-2cf0-4041-b811-5a4197a18876","Type":"ContainerStarted","Data":"a432e20fbbd2111417399c3d212cda73680f222397339e6db0281ee788d878ec"} Jan 20 17:14:35 crc kubenswrapper[4995]: I0120 17:14:35.035267 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lxstp" podStartSLOduration=2.553855799 podStartE2EDuration="5.035246869s" podCreationTimestamp="2026-01-20 17:14:30 +0000 UTC" firstStartedPulling="2026-01-20 17:14:31.975708584 +0000 UTC m=+2590.220313400" lastFinishedPulling="2026-01-20 17:14:34.457099634 +0000 UTC m=+2592.701704470" observedRunningTime="2026-01-20 17:14:35.034707474 +0000 UTC m=+2593.279312320" watchObservedRunningTime="2026-01-20 17:14:35.035246869 +0000 UTC m=+2593.279851675" Jan 20 17:14:39 crc kubenswrapper[4995]: I0120 17:14:39.923628 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-btx7c"] Jan 20 17:14:39 crc kubenswrapper[4995]: I0120 17:14:39.927341 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:39 crc kubenswrapper[4995]: I0120 17:14:39.944361 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-btx7c"] Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.026464 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-catalog-content\") pod \"redhat-marketplace-btx7c\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.026515 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-utilities\") pod \"redhat-marketplace-btx7c\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.026585 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7rxz\" (UniqueName: \"kubernetes.io/projected/3959470d-2132-4b10-8158-7264d7327392-kube-api-access-m7rxz\") pod \"redhat-marketplace-btx7c\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.128688 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-catalog-content\") pod \"redhat-marketplace-btx7c\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.128735 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-utilities\") pod \"redhat-marketplace-btx7c\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.128794 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7rxz\" (UniqueName: \"kubernetes.io/projected/3959470d-2132-4b10-8158-7264d7327392-kube-api-access-m7rxz\") pod \"redhat-marketplace-btx7c\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.129387 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-utilities\") pod \"redhat-marketplace-btx7c\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.129629 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-catalog-content\") pod \"redhat-marketplace-btx7c\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.153213 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7rxz\" (UniqueName: \"kubernetes.io/projected/3959470d-2132-4b10-8158-7264d7327392-kube-api-access-m7rxz\") pod \"redhat-marketplace-btx7c\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.262722 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.387571 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.387883 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.472490 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:40 crc kubenswrapper[4995]: I0120 17:14:40.852100 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-btx7c"] Jan 20 17:14:41 crc kubenswrapper[4995]: I0120 17:14:41.084272 4995 generic.go:334] "Generic (PLEG): container finished" podID="3959470d-2132-4b10-8158-7264d7327392" containerID="8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede" exitCode=0 Jan 20 17:14:41 crc kubenswrapper[4995]: I0120 17:14:41.084363 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-btx7c" event={"ID":"3959470d-2132-4b10-8158-7264d7327392","Type":"ContainerDied","Data":"8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede"} Jan 20 17:14:41 crc kubenswrapper[4995]: I0120 17:14:41.084816 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-btx7c" event={"ID":"3959470d-2132-4b10-8158-7264d7327392","Type":"ContainerStarted","Data":"aadb0adaaa6932246e98784a23b1c286c2d0e3872a2baeffc1ab9e72f5b4c5c3"} Jan 20 17:14:41 crc kubenswrapper[4995]: I0120 17:14:41.127288 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:42 crc kubenswrapper[4995]: I0120 17:14:42.883069 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lxstp"] Jan 20 17:14:43 crc kubenswrapper[4995]: I0120 17:14:43.110874 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-btx7c" event={"ID":"3959470d-2132-4b10-8158-7264d7327392","Type":"ContainerStarted","Data":"a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0"} Jan 20 17:14:43 crc kubenswrapper[4995]: I0120 17:14:43.110967 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lxstp" podUID="c5d36177-2cf0-4041-b811-5a4197a18876" containerName="registry-server" containerID="cri-o://a432e20fbbd2111417399c3d212cda73680f222397339e6db0281ee788d878ec" gracePeriod=2 Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.142048 4995 generic.go:334] "Generic (PLEG): container finished" podID="3959470d-2132-4b10-8158-7264d7327392" containerID="a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0" exitCode=0 Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.142374 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-btx7c" event={"ID":"3959470d-2132-4b10-8158-7264d7327392","Type":"ContainerDied","Data":"a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0"} Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.145854 4995 generic.go:334] "Generic (PLEG): container finished" podID="c5d36177-2cf0-4041-b811-5a4197a18876" containerID="a432e20fbbd2111417399c3d212cda73680f222397339e6db0281ee788d878ec" exitCode=0 Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.145896 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxstp" event={"ID":"c5d36177-2cf0-4041-b811-5a4197a18876","Type":"ContainerDied","Data":"a432e20fbbd2111417399c3d212cda73680f222397339e6db0281ee788d878ec"} Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.663659 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.739215 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-catalog-content\") pod \"c5d36177-2cf0-4041-b811-5a4197a18876\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.739363 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-utilities\") pod \"c5d36177-2cf0-4041-b811-5a4197a18876\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.739391 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmg44\" (UniqueName: \"kubernetes.io/projected/c5d36177-2cf0-4041-b811-5a4197a18876-kube-api-access-fmg44\") pod \"c5d36177-2cf0-4041-b811-5a4197a18876\" (UID: \"c5d36177-2cf0-4041-b811-5a4197a18876\") " Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.740093 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-utilities" (OuterVolumeSpecName: "utilities") pod "c5d36177-2cf0-4041-b811-5a4197a18876" (UID: "c5d36177-2cf0-4041-b811-5a4197a18876"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.749409 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5d36177-2cf0-4041-b811-5a4197a18876-kube-api-access-fmg44" (OuterVolumeSpecName: "kube-api-access-fmg44") pod "c5d36177-2cf0-4041-b811-5a4197a18876" (UID: "c5d36177-2cf0-4041-b811-5a4197a18876"). InnerVolumeSpecName "kube-api-access-fmg44". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.786514 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c5d36177-2cf0-4041-b811-5a4197a18876" (UID: "c5d36177-2cf0-4041-b811-5a4197a18876"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.842862 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.842901 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5d36177-2cf0-4041-b811-5a4197a18876-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:14:44 crc kubenswrapper[4995]: I0120 17:14:44.842913 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmg44\" (UniqueName: \"kubernetes.io/projected/c5d36177-2cf0-4041-b811-5a4197a18876-kube-api-access-fmg44\") on node \"crc\" DevicePath \"\"" Jan 20 17:14:45 crc kubenswrapper[4995]: I0120 17:14:45.155505 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-btx7c" event={"ID":"3959470d-2132-4b10-8158-7264d7327392","Type":"ContainerStarted","Data":"cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45"} Jan 20 17:14:45 crc kubenswrapper[4995]: I0120 17:14:45.158925 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxstp" event={"ID":"c5d36177-2cf0-4041-b811-5a4197a18876","Type":"ContainerDied","Data":"6ac66ae8d1868d613db9e485371c71254e8a7b1730137d55c08b15fa45f7b67e"} Jan 20 17:14:45 crc kubenswrapper[4995]: I0120 17:14:45.159102 4995 scope.go:117] "RemoveContainer" containerID="a432e20fbbd2111417399c3d212cda73680f222397339e6db0281ee788d878ec" Jan 20 17:14:45 crc kubenswrapper[4995]: I0120 17:14:45.159282 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lxstp" Jan 20 17:14:45 crc kubenswrapper[4995]: I0120 17:14:45.185416 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-btx7c" podStartSLOduration=2.403083065 podStartE2EDuration="6.185398531s" podCreationTimestamp="2026-01-20 17:14:39 +0000 UTC" firstStartedPulling="2026-01-20 17:14:41.086055151 +0000 UTC m=+2599.330659957" lastFinishedPulling="2026-01-20 17:14:44.868370617 +0000 UTC m=+2603.112975423" observedRunningTime="2026-01-20 17:14:45.178642409 +0000 UTC m=+2603.423247215" watchObservedRunningTime="2026-01-20 17:14:45.185398531 +0000 UTC m=+2603.430003337" Jan 20 17:14:45 crc kubenswrapper[4995]: I0120 17:14:45.193604 4995 scope.go:117] "RemoveContainer" containerID="564ea4f265ea87afd5de4149c6049af857d8204f447edc31a1f3b41d6b338c36" Jan 20 17:14:45 crc kubenswrapper[4995]: I0120 17:14:45.199338 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lxstp"] Jan 20 17:14:45 crc kubenswrapper[4995]: I0120 17:14:45.213654 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lxstp"] Jan 20 17:14:45 crc kubenswrapper[4995]: I0120 17:14:45.219931 4995 scope.go:117] "RemoveContainer" containerID="f6cd6e5d33aa5567776bda0ae25b74b20aeb886dee3f6f68d35d51b60c54acab" Jan 20 17:14:46 crc kubenswrapper[4995]: I0120 17:14:46.004909 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5d36177-2cf0-4041-b811-5a4197a18876" path="/var/lib/kubelet/pods/c5d36177-2cf0-4041-b811-5a4197a18876/volumes" Jan 20 17:14:50 crc kubenswrapper[4995]: I0120 17:14:50.263221 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:50 crc kubenswrapper[4995]: I0120 17:14:50.263884 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:50 crc kubenswrapper[4995]: I0120 17:14:50.338277 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:51 crc kubenswrapper[4995]: I0120 17:14:51.262508 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:51 crc kubenswrapper[4995]: I0120 17:14:51.310090 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-btx7c"] Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.230633 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-btx7c" podUID="3959470d-2132-4b10-8158-7264d7327392" containerName="registry-server" containerID="cri-o://cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45" gracePeriod=2 Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.685215 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.831872 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-catalog-content\") pod \"3959470d-2132-4b10-8158-7264d7327392\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.831916 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-utilities\") pod \"3959470d-2132-4b10-8158-7264d7327392\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.832024 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7rxz\" (UniqueName: \"kubernetes.io/projected/3959470d-2132-4b10-8158-7264d7327392-kube-api-access-m7rxz\") pod \"3959470d-2132-4b10-8158-7264d7327392\" (UID: \"3959470d-2132-4b10-8158-7264d7327392\") " Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.833431 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-utilities" (OuterVolumeSpecName: "utilities") pod "3959470d-2132-4b10-8158-7264d7327392" (UID: "3959470d-2132-4b10-8158-7264d7327392"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.840557 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3959470d-2132-4b10-8158-7264d7327392-kube-api-access-m7rxz" (OuterVolumeSpecName: "kube-api-access-m7rxz") pod "3959470d-2132-4b10-8158-7264d7327392" (UID: "3959470d-2132-4b10-8158-7264d7327392"). InnerVolumeSpecName "kube-api-access-m7rxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.859892 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3959470d-2132-4b10-8158-7264d7327392" (UID: "3959470d-2132-4b10-8158-7264d7327392"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.934220 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.934265 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3959470d-2132-4b10-8158-7264d7327392-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:14:53 crc kubenswrapper[4995]: I0120 17:14:53.934283 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7rxz\" (UniqueName: \"kubernetes.io/projected/3959470d-2132-4b10-8158-7264d7327392-kube-api-access-m7rxz\") on node \"crc\" DevicePath \"\"" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.242994 4995 generic.go:334] "Generic (PLEG): container finished" podID="3959470d-2132-4b10-8158-7264d7327392" containerID="cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45" exitCode=0 Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.243062 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-btx7c" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.243088 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-btx7c" event={"ID":"3959470d-2132-4b10-8158-7264d7327392","Type":"ContainerDied","Data":"cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45"} Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.243488 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-btx7c" event={"ID":"3959470d-2132-4b10-8158-7264d7327392","Type":"ContainerDied","Data":"aadb0adaaa6932246e98784a23b1c286c2d0e3872a2baeffc1ab9e72f5b4c5c3"} Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.243511 4995 scope.go:117] "RemoveContainer" containerID="cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.274720 4995 scope.go:117] "RemoveContainer" containerID="a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.275716 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-btx7c"] Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.285539 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-btx7c"] Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.299634 4995 scope.go:117] "RemoveContainer" containerID="8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.362137 4995 scope.go:117] "RemoveContainer" containerID="cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45" Jan 20 17:14:54 crc kubenswrapper[4995]: E0120 17:14:54.362677 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45\": container with ID starting with cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45 not found: ID does not exist" containerID="cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.362712 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45"} err="failed to get container status \"cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45\": rpc error: code = NotFound desc = could not find container \"cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45\": container with ID starting with cb6284b283c9e53fa60b0a4dcd4743a208de20f3d221f7f834b884354c52be45 not found: ID does not exist" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.362731 4995 scope.go:117] "RemoveContainer" containerID="a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0" Jan 20 17:14:54 crc kubenswrapper[4995]: E0120 17:14:54.363109 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0\": container with ID starting with a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0 not found: ID does not exist" containerID="a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.363130 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0"} err="failed to get container status \"a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0\": rpc error: code = NotFound desc = could not find container \"a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0\": container with ID starting with a049397a84d03d1abbe7d84b1026b50db0fd99040746dd35c4cd7936873dabc0 not found: ID does not exist" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.363144 4995 scope.go:117] "RemoveContainer" containerID="8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede" Jan 20 17:14:54 crc kubenswrapper[4995]: E0120 17:14:54.363409 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede\": container with ID starting with 8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede not found: ID does not exist" containerID="8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede" Jan 20 17:14:54 crc kubenswrapper[4995]: I0120 17:14:54.363457 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede"} err="failed to get container status \"8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede\": rpc error: code = NotFound desc = could not find container \"8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede\": container with ID starting with 8f115ce5ec78c626e9f5039b21fc859c734b96af02bb3b99c0bf9c1d0bcdaede not found: ID does not exist" Jan 20 17:14:56 crc kubenswrapper[4995]: I0120 17:14:56.024811 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3959470d-2132-4b10-8158-7264d7327392" path="/var/lib/kubelet/pods/3959470d-2132-4b10-8158-7264d7327392/volumes" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.156169 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv"] Jan 20 17:15:00 crc kubenswrapper[4995]: E0120 17:15:00.157020 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3959470d-2132-4b10-8158-7264d7327392" containerName="extract-content" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.157036 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3959470d-2132-4b10-8158-7264d7327392" containerName="extract-content" Jan 20 17:15:00 crc kubenswrapper[4995]: E0120 17:15:00.157061 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d36177-2cf0-4041-b811-5a4197a18876" containerName="extract-content" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.157069 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d36177-2cf0-4041-b811-5a4197a18876" containerName="extract-content" Jan 20 17:15:00 crc kubenswrapper[4995]: E0120 17:15:00.157154 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3959470d-2132-4b10-8158-7264d7327392" containerName="extract-utilities" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.157164 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3959470d-2132-4b10-8158-7264d7327392" containerName="extract-utilities" Jan 20 17:15:00 crc kubenswrapper[4995]: E0120 17:15:00.157184 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d36177-2cf0-4041-b811-5a4197a18876" containerName="extract-utilities" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.157192 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d36177-2cf0-4041-b811-5a4197a18876" containerName="extract-utilities" Jan 20 17:15:00 crc kubenswrapper[4995]: E0120 17:15:00.157203 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5d36177-2cf0-4041-b811-5a4197a18876" containerName="registry-server" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.157210 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5d36177-2cf0-4041-b811-5a4197a18876" containerName="registry-server" Jan 20 17:15:00 crc kubenswrapper[4995]: E0120 17:15:00.157246 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3959470d-2132-4b10-8158-7264d7327392" containerName="registry-server" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.157253 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3959470d-2132-4b10-8158-7264d7327392" containerName="registry-server" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.157463 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3959470d-2132-4b10-8158-7264d7327392" containerName="registry-server" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.157479 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5d36177-2cf0-4041-b811-5a4197a18876" containerName="registry-server" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.158319 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.160367 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.165173 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.166117 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv"] Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.256612 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmdx6\" (UniqueName: \"kubernetes.io/projected/5115dbbb-9f37-4041-a81c-e575552102ba-kube-api-access-kmdx6\") pod \"collect-profiles-29482155-hq2zv\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.256655 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5115dbbb-9f37-4041-a81c-e575552102ba-config-volume\") pod \"collect-profiles-29482155-hq2zv\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.256723 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5115dbbb-9f37-4041-a81c-e575552102ba-secret-volume\") pod \"collect-profiles-29482155-hq2zv\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.358862 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmdx6\" (UniqueName: \"kubernetes.io/projected/5115dbbb-9f37-4041-a81c-e575552102ba-kube-api-access-kmdx6\") pod \"collect-profiles-29482155-hq2zv\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.358930 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5115dbbb-9f37-4041-a81c-e575552102ba-config-volume\") pod \"collect-profiles-29482155-hq2zv\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.359001 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5115dbbb-9f37-4041-a81c-e575552102ba-secret-volume\") pod \"collect-profiles-29482155-hq2zv\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.360474 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5115dbbb-9f37-4041-a81c-e575552102ba-config-volume\") pod \"collect-profiles-29482155-hq2zv\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.366591 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5115dbbb-9f37-4041-a81c-e575552102ba-secret-volume\") pod \"collect-profiles-29482155-hq2zv\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.377985 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmdx6\" (UniqueName: \"kubernetes.io/projected/5115dbbb-9f37-4041-a81c-e575552102ba-kube-api-access-kmdx6\") pod \"collect-profiles-29482155-hq2zv\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:00 crc kubenswrapper[4995]: I0120 17:15:00.481028 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:01 crc kubenswrapper[4995]: I0120 17:15:01.010913 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv"] Jan 20 17:15:01 crc kubenswrapper[4995]: I0120 17:15:01.312423 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" event={"ID":"5115dbbb-9f37-4041-a81c-e575552102ba","Type":"ContainerStarted","Data":"d19aa1b1040231a473208ec9759952cc059aaadba5b84233b7cdf5c5396e14fa"} Jan 20 17:15:02 crc kubenswrapper[4995]: I0120 17:15:02.325328 4995 generic.go:334] "Generic (PLEG): container finished" podID="5115dbbb-9f37-4041-a81c-e575552102ba" containerID="ec1c4b19d6fb0f822af3bc31fa4ccc39b8335a27accf81a1037d41596b77b6d1" exitCode=0 Jan 20 17:15:02 crc kubenswrapper[4995]: I0120 17:15:02.325443 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" event={"ID":"5115dbbb-9f37-4041-a81c-e575552102ba","Type":"ContainerDied","Data":"ec1c4b19d6fb0f822af3bc31fa4ccc39b8335a27accf81a1037d41596b77b6d1"} Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.713859 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.822504 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5115dbbb-9f37-4041-a81c-e575552102ba-secret-volume\") pod \"5115dbbb-9f37-4041-a81c-e575552102ba\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.822615 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5115dbbb-9f37-4041-a81c-e575552102ba-config-volume\") pod \"5115dbbb-9f37-4041-a81c-e575552102ba\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.822727 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmdx6\" (UniqueName: \"kubernetes.io/projected/5115dbbb-9f37-4041-a81c-e575552102ba-kube-api-access-kmdx6\") pod \"5115dbbb-9f37-4041-a81c-e575552102ba\" (UID: \"5115dbbb-9f37-4041-a81c-e575552102ba\") " Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.823732 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5115dbbb-9f37-4041-a81c-e575552102ba-config-volume" (OuterVolumeSpecName: "config-volume") pod "5115dbbb-9f37-4041-a81c-e575552102ba" (UID: "5115dbbb-9f37-4041-a81c-e575552102ba"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.828887 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5115dbbb-9f37-4041-a81c-e575552102ba-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5115dbbb-9f37-4041-a81c-e575552102ba" (UID: "5115dbbb-9f37-4041-a81c-e575552102ba"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.829679 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5115dbbb-9f37-4041-a81c-e575552102ba-kube-api-access-kmdx6" (OuterVolumeSpecName: "kube-api-access-kmdx6") pod "5115dbbb-9f37-4041-a81c-e575552102ba" (UID: "5115dbbb-9f37-4041-a81c-e575552102ba"). InnerVolumeSpecName "kube-api-access-kmdx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.924797 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5115dbbb-9f37-4041-a81c-e575552102ba-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.924825 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmdx6\" (UniqueName: \"kubernetes.io/projected/5115dbbb-9f37-4041-a81c-e575552102ba-kube-api-access-kmdx6\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:03 crc kubenswrapper[4995]: I0120 17:15:03.924835 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5115dbbb-9f37-4041-a81c-e575552102ba-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:04 crc kubenswrapper[4995]: I0120 17:15:04.638865 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" event={"ID":"5115dbbb-9f37-4041-a81c-e575552102ba","Type":"ContainerDied","Data":"d19aa1b1040231a473208ec9759952cc059aaadba5b84233b7cdf5c5396e14fa"} Jan 20 17:15:04 crc kubenswrapper[4995]: I0120 17:15:04.639124 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d19aa1b1040231a473208ec9759952cc059aaadba5b84233b7cdf5c5396e14fa" Jan 20 17:15:04 crc kubenswrapper[4995]: I0120 17:15:04.639243 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv" Jan 20 17:15:04 crc kubenswrapper[4995]: I0120 17:15:04.803693 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs"] Jan 20 17:15:04 crc kubenswrapper[4995]: I0120 17:15:04.812719 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482110-4xrhs"] Jan 20 17:15:06 crc kubenswrapper[4995]: I0120 17:15:06.009893 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43165ab8-b1a3-4885-88f6-bc83ef03f454" path="/var/lib/kubelet/pods/43165ab8-b1a3-4885-88f6-bc83ef03f454/volumes" Jan 20 17:15:33 crc kubenswrapper[4995]: I0120 17:15:33.945040 4995 scope.go:117] "RemoveContainer" containerID="34843abfb0b748b41038d6bd4d097fbbf550786c4fb4b98a4cbced79167ba9a7" Jan 20 17:15:47 crc kubenswrapper[4995]: I0120 17:15:47.108105 4995 generic.go:334] "Generic (PLEG): container finished" podID="a463f304-1432-497f-9f19-3cd3b4d05da2" containerID="656ada2ebd93d1994b7e8a487d0ff7fe8cd3f954a65f085bf653fafb7ad2c363" exitCode=0 Jan 20 17:15:47 crc kubenswrapper[4995]: I0120 17:15:47.108303 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" event={"ID":"a463f304-1432-497f-9f19-3cd3b4d05da2","Type":"ContainerDied","Data":"656ada2ebd93d1994b7e8a487d0ff7fe8cd3f954a65f085bf653fafb7ad2c363"} Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.580453 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.652814 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-inventory\") pod \"a463f304-1432-497f-9f19-3cd3b4d05da2\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.652897 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-extra-config-0\") pod \"a463f304-1432-497f-9f19-3cd3b4d05da2\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.653010 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-1\") pod \"a463f304-1432-497f-9f19-3cd3b4d05da2\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.653146 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-0\") pod \"a463f304-1432-497f-9f19-3cd3b4d05da2\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.653194 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-0\") pod \"a463f304-1432-497f-9f19-3cd3b4d05da2\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.653293 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-1\") pod \"a463f304-1432-497f-9f19-3cd3b4d05da2\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.653873 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-ssh-key-openstack-edpm-ipam\") pod \"a463f304-1432-497f-9f19-3cd3b4d05da2\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.654400 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvd4m\" (UniqueName: \"kubernetes.io/projected/a463f304-1432-497f-9f19-3cd3b4d05da2-kube-api-access-wvd4m\") pod \"a463f304-1432-497f-9f19-3cd3b4d05da2\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.656005 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-combined-ca-bundle\") pod \"a463f304-1432-497f-9f19-3cd3b4d05da2\" (UID: \"a463f304-1432-497f-9f19-3cd3b4d05da2\") " Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.662685 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "a463f304-1432-497f-9f19-3cd3b4d05da2" (UID: "a463f304-1432-497f-9f19-3cd3b4d05da2"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.662946 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a463f304-1432-497f-9f19-3cd3b4d05da2-kube-api-access-wvd4m" (OuterVolumeSpecName: "kube-api-access-wvd4m") pod "a463f304-1432-497f-9f19-3cd3b4d05da2" (UID: "a463f304-1432-497f-9f19-3cd3b4d05da2"). InnerVolumeSpecName "kube-api-access-wvd4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.685309 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "a463f304-1432-497f-9f19-3cd3b4d05da2" (UID: "a463f304-1432-497f-9f19-3cd3b4d05da2"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.686908 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "a463f304-1432-497f-9f19-3cd3b4d05da2" (UID: "a463f304-1432-497f-9f19-3cd3b4d05da2"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.688788 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "a463f304-1432-497f-9f19-3cd3b4d05da2" (UID: "a463f304-1432-497f-9f19-3cd3b4d05da2"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.693270 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a463f304-1432-497f-9f19-3cd3b4d05da2" (UID: "a463f304-1432-497f-9f19-3cd3b4d05da2"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.694872 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-inventory" (OuterVolumeSpecName: "inventory") pod "a463f304-1432-497f-9f19-3cd3b4d05da2" (UID: "a463f304-1432-497f-9f19-3cd3b4d05da2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.697286 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "a463f304-1432-497f-9f19-3cd3b4d05da2" (UID: "a463f304-1432-497f-9f19-3cd3b4d05da2"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.701674 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "a463f304-1432-497f-9f19-3cd3b4d05da2" (UID: "a463f304-1432-497f-9f19-3cd3b4d05da2"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.760321 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.760346 4995 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.760356 4995 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.760365 4995 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.760376 4995 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.760384 4995 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.760394 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.760401 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvd4m\" (UniqueName: \"kubernetes.io/projected/a463f304-1432-497f-9f19-3cd3b4d05da2-kube-api-access-wvd4m\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:48 crc kubenswrapper[4995]: I0120 17:15:48.760409 4995 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a463f304-1432-497f-9f19-3cd3b4d05da2-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.133293 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" event={"ID":"a463f304-1432-497f-9f19-3cd3b4d05da2","Type":"ContainerDied","Data":"9c59f8994a34399c2d75bd7c9521c4360e2295daf185ab28acfaafe78a1d9196"} Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.133354 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c59f8994a34399c2d75bd7c9521c4360e2295daf185ab28acfaafe78a1d9196" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.133440 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-f6vqc" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.240180 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7"] Jan 20 17:15:49 crc kubenswrapper[4995]: E0120 17:15:49.240674 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a463f304-1432-497f-9f19-3cd3b4d05da2" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.240689 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a463f304-1432-497f-9f19-3cd3b4d05da2" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 20 17:15:49 crc kubenswrapper[4995]: E0120 17:15:49.240722 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5115dbbb-9f37-4041-a81c-e575552102ba" containerName="collect-profiles" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.240731 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5115dbbb-9f37-4041-a81c-e575552102ba" containerName="collect-profiles" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.240976 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="a463f304-1432-497f-9f19-3cd3b4d05da2" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.240995 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="5115dbbb-9f37-4041-a81c-e575552102ba" containerName="collect-profiles" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.241828 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.249585 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.249957 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.250261 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.250346 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.250436 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-nw6l2" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.258417 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7"] Jan 20 17:15:49 crc kubenswrapper[4995]: E0120 17:15:49.291379 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda463f304_1432_497f_9f19_3cd3b4d05da2.slice/crio-9c59f8994a34399c2d75bd7c9521c4360e2295daf185ab28acfaafe78a1d9196\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda463f304_1432_497f_9f19_3cd3b4d05da2.slice\": RecentStats: unable to find data in memory cache]" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.378416 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.378557 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.378685 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.378738 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.378878 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.379062 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvhjs\" (UniqueName: \"kubernetes.io/projected/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-kube-api-access-jvhjs\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.379134 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.481310 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.481356 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.481377 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.481413 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.481476 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvhjs\" (UniqueName: \"kubernetes.io/projected/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-kube-api-access-jvhjs\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.481496 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.481535 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.486701 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.486730 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.486770 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.486990 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.487505 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.487875 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.513782 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvhjs\" (UniqueName: \"kubernetes.io/projected/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-kube-api-access-jvhjs\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:49 crc kubenswrapper[4995]: I0120 17:15:49.569644 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:15:50 crc kubenswrapper[4995]: I0120 17:15:50.185186 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7"] Jan 20 17:15:51 crc kubenswrapper[4995]: I0120 17:15:51.150675 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" event={"ID":"896e00af-dc03-4ed9-b3e7-314eaf50d3b9","Type":"ContainerStarted","Data":"82d7e779c45943793269b7f2b977adbe3eb962f211c21f38b86de451a6358cc1"} Jan 20 17:15:52 crc kubenswrapper[4995]: I0120 17:15:52.160420 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" event={"ID":"896e00af-dc03-4ed9-b3e7-314eaf50d3b9","Type":"ContainerStarted","Data":"5ea9bfc288f950e806171a0b712108300218f8ee5765029b61ca1cb065d14df5"} Jan 20 17:15:52 crc kubenswrapper[4995]: I0120 17:15:52.193883 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" podStartSLOduration=2.423197044 podStartE2EDuration="3.193863121s" podCreationTimestamp="2026-01-20 17:15:49 +0000 UTC" firstStartedPulling="2026-01-20 17:15:50.191473651 +0000 UTC m=+2668.436078457" lastFinishedPulling="2026-01-20 17:15:50.962139718 +0000 UTC m=+2669.206744534" observedRunningTime="2026-01-20 17:15:52.18087506 +0000 UTC m=+2670.425479876" watchObservedRunningTime="2026-01-20 17:15:52.193863121 +0000 UTC m=+2670.438467927" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.581143 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5m44h"] Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.588517 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.613551 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5m44h"] Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.685493 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-catalog-content\") pod \"redhat-operators-5m44h\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.685962 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbvmj\" (UniqueName: \"kubernetes.io/projected/f99b082a-e738-409e-8e0c-1aa209231caf-kube-api-access-tbvmj\") pod \"redhat-operators-5m44h\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.686002 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-utilities\") pod \"redhat-operators-5m44h\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.788091 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbvmj\" (UniqueName: \"kubernetes.io/projected/f99b082a-e738-409e-8e0c-1aa209231caf-kube-api-access-tbvmj\") pod \"redhat-operators-5m44h\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.788359 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-utilities\") pod \"redhat-operators-5m44h\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.788523 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-catalog-content\") pod \"redhat-operators-5m44h\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.788969 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-utilities\") pod \"redhat-operators-5m44h\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.788988 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-catalog-content\") pod \"redhat-operators-5m44h\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.807415 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbvmj\" (UniqueName: \"kubernetes.io/projected/f99b082a-e738-409e-8e0c-1aa209231caf-kube-api-access-tbvmj\") pod \"redhat-operators-5m44h\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:58 crc kubenswrapper[4995]: I0120 17:15:58.916421 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:15:59 crc kubenswrapper[4995]: I0120 17:15:59.389924 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5m44h"] Jan 20 17:16:00 crc kubenswrapper[4995]: I0120 17:16:00.280039 4995 generic.go:334] "Generic (PLEG): container finished" podID="f99b082a-e738-409e-8e0c-1aa209231caf" containerID="00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac" exitCode=0 Jan 20 17:16:00 crc kubenswrapper[4995]: I0120 17:16:00.280238 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m44h" event={"ID":"f99b082a-e738-409e-8e0c-1aa209231caf","Type":"ContainerDied","Data":"00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac"} Jan 20 17:16:00 crc kubenswrapper[4995]: I0120 17:16:00.280360 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m44h" event={"ID":"f99b082a-e738-409e-8e0c-1aa209231caf","Type":"ContainerStarted","Data":"8f5838e2d85f66f0b1139abe474b8640de57c01cb92371257975b941e24d43fe"} Jan 20 17:16:00 crc kubenswrapper[4995]: I0120 17:16:00.572066 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:16:00 crc kubenswrapper[4995]: I0120 17:16:00.572147 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:16:02 crc kubenswrapper[4995]: I0120 17:16:02.301390 4995 generic.go:334] "Generic (PLEG): container finished" podID="f99b082a-e738-409e-8e0c-1aa209231caf" containerID="840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8" exitCode=0 Jan 20 17:16:02 crc kubenswrapper[4995]: I0120 17:16:02.302117 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m44h" event={"ID":"f99b082a-e738-409e-8e0c-1aa209231caf","Type":"ContainerDied","Data":"840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8"} Jan 20 17:16:04 crc kubenswrapper[4995]: I0120 17:16:04.323225 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m44h" event={"ID":"f99b082a-e738-409e-8e0c-1aa209231caf","Type":"ContainerStarted","Data":"bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279"} Jan 20 17:16:04 crc kubenswrapper[4995]: I0120 17:16:04.343636 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5m44h" podStartSLOduration=3.49040097 podStartE2EDuration="6.343620629s" podCreationTimestamp="2026-01-20 17:15:58 +0000 UTC" firstStartedPulling="2026-01-20 17:16:00.283966153 +0000 UTC m=+2678.528570969" lastFinishedPulling="2026-01-20 17:16:03.137185822 +0000 UTC m=+2681.381790628" observedRunningTime="2026-01-20 17:16:04.341033108 +0000 UTC m=+2682.585637914" watchObservedRunningTime="2026-01-20 17:16:04.343620629 +0000 UTC m=+2682.588225435" Jan 20 17:16:08 crc kubenswrapper[4995]: I0120 17:16:08.917514 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:16:08 crc kubenswrapper[4995]: I0120 17:16:08.917927 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:16:09 crc kubenswrapper[4995]: I0120 17:16:09.985801 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5m44h" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" containerName="registry-server" probeResult="failure" output=< Jan 20 17:16:09 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 17:16:09 crc kubenswrapper[4995]: > Jan 20 17:16:18 crc kubenswrapper[4995]: I0120 17:16:18.991231 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:16:19 crc kubenswrapper[4995]: I0120 17:16:19.061306 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:16:19 crc kubenswrapper[4995]: I0120 17:16:19.241055 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5m44h"] Jan 20 17:16:20 crc kubenswrapper[4995]: I0120 17:16:20.485356 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5m44h" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" containerName="registry-server" containerID="cri-o://bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279" gracePeriod=2 Jan 20 17:16:20 crc kubenswrapper[4995]: I0120 17:16:20.939442 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.066556 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-utilities\") pod \"f99b082a-e738-409e-8e0c-1aa209231caf\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.066758 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-catalog-content\") pod \"f99b082a-e738-409e-8e0c-1aa209231caf\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.066799 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbvmj\" (UniqueName: \"kubernetes.io/projected/f99b082a-e738-409e-8e0c-1aa209231caf-kube-api-access-tbvmj\") pod \"f99b082a-e738-409e-8e0c-1aa209231caf\" (UID: \"f99b082a-e738-409e-8e0c-1aa209231caf\") " Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.067420 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-utilities" (OuterVolumeSpecName: "utilities") pod "f99b082a-e738-409e-8e0c-1aa209231caf" (UID: "f99b082a-e738-409e-8e0c-1aa209231caf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.073280 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f99b082a-e738-409e-8e0c-1aa209231caf-kube-api-access-tbvmj" (OuterVolumeSpecName: "kube-api-access-tbvmj") pod "f99b082a-e738-409e-8e0c-1aa209231caf" (UID: "f99b082a-e738-409e-8e0c-1aa209231caf"). InnerVolumeSpecName "kube-api-access-tbvmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.169242 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.169274 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbvmj\" (UniqueName: \"kubernetes.io/projected/f99b082a-e738-409e-8e0c-1aa209231caf-kube-api-access-tbvmj\") on node \"crc\" DevicePath \"\"" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.171505 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f99b082a-e738-409e-8e0c-1aa209231caf" (UID: "f99b082a-e738-409e-8e0c-1aa209231caf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.270806 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f99b082a-e738-409e-8e0c-1aa209231caf-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.497274 4995 generic.go:334] "Generic (PLEG): container finished" podID="f99b082a-e738-409e-8e0c-1aa209231caf" containerID="bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279" exitCode=0 Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.497362 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5m44h" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.497365 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m44h" event={"ID":"f99b082a-e738-409e-8e0c-1aa209231caf","Type":"ContainerDied","Data":"bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279"} Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.499765 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5m44h" event={"ID":"f99b082a-e738-409e-8e0c-1aa209231caf","Type":"ContainerDied","Data":"8f5838e2d85f66f0b1139abe474b8640de57c01cb92371257975b941e24d43fe"} Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.499800 4995 scope.go:117] "RemoveContainer" containerID="bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.524149 4995 scope.go:117] "RemoveContainer" containerID="840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.546647 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5m44h"] Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.555592 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5m44h"] Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.574149 4995 scope.go:117] "RemoveContainer" containerID="00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.609864 4995 scope.go:117] "RemoveContainer" containerID="bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279" Jan 20 17:16:21 crc kubenswrapper[4995]: E0120 17:16:21.610328 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279\": container with ID starting with bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279 not found: ID does not exist" containerID="bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.610371 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279"} err="failed to get container status \"bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279\": rpc error: code = NotFound desc = could not find container \"bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279\": container with ID starting with bd05ad1c93113ed07e96df81f1ab6493dd79bb9ee7451d5581871ccda3c2d279 not found: ID does not exist" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.610397 4995 scope.go:117] "RemoveContainer" containerID="840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8" Jan 20 17:16:21 crc kubenswrapper[4995]: E0120 17:16:21.610889 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8\": container with ID starting with 840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8 not found: ID does not exist" containerID="840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.610932 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8"} err="failed to get container status \"840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8\": rpc error: code = NotFound desc = could not find container \"840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8\": container with ID starting with 840fda6e0c87152b03d7d5b71bae15229b57c3216a143de45d578024d2cc31d8 not found: ID does not exist" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.610962 4995 scope.go:117] "RemoveContainer" containerID="00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac" Jan 20 17:16:21 crc kubenswrapper[4995]: E0120 17:16:21.611407 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac\": container with ID starting with 00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac not found: ID does not exist" containerID="00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac" Jan 20 17:16:21 crc kubenswrapper[4995]: I0120 17:16:21.611432 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac"} err="failed to get container status \"00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac\": rpc error: code = NotFound desc = could not find container \"00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac\": container with ID starting with 00f362872c01a9c59772774c302cc491e717de9ec5510af97d2487bbaa0313ac not found: ID does not exist" Jan 20 17:16:22 crc kubenswrapper[4995]: I0120 17:16:22.000477 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" path="/var/lib/kubelet/pods/f99b082a-e738-409e-8e0c-1aa209231caf/volumes" Jan 20 17:16:30 crc kubenswrapper[4995]: I0120 17:16:30.572425 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:16:30 crc kubenswrapper[4995]: I0120 17:16:30.573010 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:17:00 crc kubenswrapper[4995]: I0120 17:17:00.571757 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:17:00 crc kubenswrapper[4995]: I0120 17:17:00.572571 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:17:00 crc kubenswrapper[4995]: I0120 17:17:00.572642 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:17:00 crc kubenswrapper[4995]: I0120 17:17:00.573542 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b1a719c573a74fe67fc6e576c604a61c3db39096bff0ed702a9e56ffca19d5c9"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:17:00 crc kubenswrapper[4995]: I0120 17:17:00.573648 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://b1a719c573a74fe67fc6e576c604a61c3db39096bff0ed702a9e56ffca19d5c9" gracePeriod=600 Jan 20 17:17:00 crc kubenswrapper[4995]: I0120 17:17:00.856254 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="b1a719c573a74fe67fc6e576c604a61c3db39096bff0ed702a9e56ffca19d5c9" exitCode=0 Jan 20 17:17:00 crc kubenswrapper[4995]: I0120 17:17:00.856339 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"b1a719c573a74fe67fc6e576c604a61c3db39096bff0ed702a9e56ffca19d5c9"} Jan 20 17:17:00 crc kubenswrapper[4995]: I0120 17:17:00.857187 4995 scope.go:117] "RemoveContainer" containerID="1c4f7a447893ff4f4faf727dad65d21f2f2e0a74fbf3df19c535419d0223c0dd" Jan 20 17:17:01 crc kubenswrapper[4995]: I0120 17:17:01.866431 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06"} Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.425755 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b8ld8"] Jan 20 17:17:30 crc kubenswrapper[4995]: E0120 17:17:30.427111 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" containerName="registry-server" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.427135 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" containerName="registry-server" Jan 20 17:17:30 crc kubenswrapper[4995]: E0120 17:17:30.427186 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" containerName="extract-content" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.427198 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" containerName="extract-content" Jan 20 17:17:30 crc kubenswrapper[4995]: E0120 17:17:30.427218 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" containerName="extract-utilities" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.427230 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" containerName="extract-utilities" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.427488 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f99b082a-e738-409e-8e0c-1aa209231caf" containerName="registry-server" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.429606 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.450065 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b8ld8"] Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.529275 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-utilities\") pod \"community-operators-b8ld8\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.529331 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8bg8\" (UniqueName: \"kubernetes.io/projected/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-kube-api-access-z8bg8\") pod \"community-operators-b8ld8\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.529591 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-catalog-content\") pod \"community-operators-b8ld8\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.630876 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-catalog-content\") pod \"community-operators-b8ld8\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.630994 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-utilities\") pod \"community-operators-b8ld8\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.631017 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8bg8\" (UniqueName: \"kubernetes.io/projected/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-kube-api-access-z8bg8\") pod \"community-operators-b8ld8\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.631532 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-catalog-content\") pod \"community-operators-b8ld8\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.631542 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-utilities\") pod \"community-operators-b8ld8\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.657859 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8bg8\" (UniqueName: \"kubernetes.io/projected/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-kube-api-access-z8bg8\") pod \"community-operators-b8ld8\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:30 crc kubenswrapper[4995]: I0120 17:17:30.763423 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:31 crc kubenswrapper[4995]: I0120 17:17:31.284706 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b8ld8"] Jan 20 17:17:32 crc kubenswrapper[4995]: I0120 17:17:32.186113 4995 generic.go:334] "Generic (PLEG): container finished" podID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerID="fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df" exitCode=0 Jan 20 17:17:32 crc kubenswrapper[4995]: I0120 17:17:32.186534 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b8ld8" event={"ID":"dbe718ac-d53d-435c-80d5-fc33e22ee3f2","Type":"ContainerDied","Data":"fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df"} Jan 20 17:17:32 crc kubenswrapper[4995]: I0120 17:17:32.186563 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b8ld8" event={"ID":"dbe718ac-d53d-435c-80d5-fc33e22ee3f2","Type":"ContainerStarted","Data":"7cbaa68cf0375f494537513aec748a1d338b5c169a09d63671cd0aa116996650"} Jan 20 17:17:34 crc kubenswrapper[4995]: I0120 17:17:34.206581 4995 generic.go:334] "Generic (PLEG): container finished" podID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerID="b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595" exitCode=0 Jan 20 17:17:34 crc kubenswrapper[4995]: I0120 17:17:34.206672 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b8ld8" event={"ID":"dbe718ac-d53d-435c-80d5-fc33e22ee3f2","Type":"ContainerDied","Data":"b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595"} Jan 20 17:17:35 crc kubenswrapper[4995]: I0120 17:17:35.223841 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b8ld8" event={"ID":"dbe718ac-d53d-435c-80d5-fc33e22ee3f2","Type":"ContainerStarted","Data":"ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c"} Jan 20 17:17:35 crc kubenswrapper[4995]: I0120 17:17:35.239115 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b8ld8" podStartSLOduration=2.686466814 podStartE2EDuration="5.239068783s" podCreationTimestamp="2026-01-20 17:17:30 +0000 UTC" firstStartedPulling="2026-01-20 17:17:32.191312216 +0000 UTC m=+2770.435917052" lastFinishedPulling="2026-01-20 17:17:34.743914215 +0000 UTC m=+2772.988519021" observedRunningTime="2026-01-20 17:17:35.238442646 +0000 UTC m=+2773.483047462" watchObservedRunningTime="2026-01-20 17:17:35.239068783 +0000 UTC m=+2773.483673589" Jan 20 17:17:40 crc kubenswrapper[4995]: I0120 17:17:40.764470 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:40 crc kubenswrapper[4995]: I0120 17:17:40.764987 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:40 crc kubenswrapper[4995]: I0120 17:17:40.838308 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:41 crc kubenswrapper[4995]: I0120 17:17:41.330451 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:41 crc kubenswrapper[4995]: I0120 17:17:41.378051 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b8ld8"] Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.311691 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b8ld8" podUID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerName="registry-server" containerID="cri-o://ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c" gracePeriod=2 Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.842911 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.847467 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-catalog-content\") pod \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.847673 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-utilities\") pod \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.848115 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8bg8\" (UniqueName: \"kubernetes.io/projected/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-kube-api-access-z8bg8\") pod \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\" (UID: \"dbe718ac-d53d-435c-80d5-fc33e22ee3f2\") " Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.848456 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-utilities" (OuterVolumeSpecName: "utilities") pod "dbe718ac-d53d-435c-80d5-fc33e22ee3f2" (UID: "dbe718ac-d53d-435c-80d5-fc33e22ee3f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.848964 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.854954 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-kube-api-access-z8bg8" (OuterVolumeSpecName: "kube-api-access-z8bg8") pod "dbe718ac-d53d-435c-80d5-fc33e22ee3f2" (UID: "dbe718ac-d53d-435c-80d5-fc33e22ee3f2"). InnerVolumeSpecName "kube-api-access-z8bg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.914263 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dbe718ac-d53d-435c-80d5-fc33e22ee3f2" (UID: "dbe718ac-d53d-435c-80d5-fc33e22ee3f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.950785 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8bg8\" (UniqueName: \"kubernetes.io/projected/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-kube-api-access-z8bg8\") on node \"crc\" DevicePath \"\"" Jan 20 17:17:43 crc kubenswrapper[4995]: I0120 17:17:43.950822 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbe718ac-d53d-435c-80d5-fc33e22ee3f2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.339697 4995 generic.go:334] "Generic (PLEG): container finished" podID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerID="ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c" exitCode=0 Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.339839 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b8ld8" event={"ID":"dbe718ac-d53d-435c-80d5-fc33e22ee3f2","Type":"ContainerDied","Data":"ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c"} Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.339879 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b8ld8" event={"ID":"dbe718ac-d53d-435c-80d5-fc33e22ee3f2","Type":"ContainerDied","Data":"7cbaa68cf0375f494537513aec748a1d338b5c169a09d63671cd0aa116996650"} Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.339932 4995 scope.go:117] "RemoveContainer" containerID="ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.340384 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b8ld8" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.392198 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b8ld8"] Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.395206 4995 scope.go:117] "RemoveContainer" containerID="b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.403007 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b8ld8"] Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.423872 4995 scope.go:117] "RemoveContainer" containerID="fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.465162 4995 scope.go:117] "RemoveContainer" containerID="ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c" Jan 20 17:17:44 crc kubenswrapper[4995]: E0120 17:17:44.465745 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c\": container with ID starting with ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c not found: ID does not exist" containerID="ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.465821 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c"} err="failed to get container status \"ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c\": rpc error: code = NotFound desc = could not find container \"ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c\": container with ID starting with ace7a1933995fa8ce43734306e078ebdebf02e11cfc44e6d95f7931262e0ee7c not found: ID does not exist" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.465864 4995 scope.go:117] "RemoveContainer" containerID="b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595" Jan 20 17:17:44 crc kubenswrapper[4995]: E0120 17:17:44.466422 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595\": container with ID starting with b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595 not found: ID does not exist" containerID="b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.466480 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595"} err="failed to get container status \"b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595\": rpc error: code = NotFound desc = could not find container \"b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595\": container with ID starting with b43a1d600664520289617aee96328de4efb7955ff4b6fdfce232468d1a5e2595 not found: ID does not exist" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.466512 4995 scope.go:117] "RemoveContainer" containerID="fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df" Jan 20 17:17:44 crc kubenswrapper[4995]: E0120 17:17:44.467112 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df\": container with ID starting with fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df not found: ID does not exist" containerID="fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df" Jan 20 17:17:44 crc kubenswrapper[4995]: I0120 17:17:44.467160 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df"} err="failed to get container status \"fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df\": rpc error: code = NotFound desc = could not find container \"fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df\": container with ID starting with fe6d3dd57ae48978be0f0abe0c2bbe575c55b581beebf772c7709ffdbf6dd3df not found: ID does not exist" Jan 20 17:17:46 crc kubenswrapper[4995]: I0120 17:17:46.002613 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" path="/var/lib/kubelet/pods/dbe718ac-d53d-435c-80d5-fc33e22ee3f2/volumes" Jan 20 17:18:14 crc kubenswrapper[4995]: I0120 17:18:14.663597 4995 generic.go:334] "Generic (PLEG): container finished" podID="896e00af-dc03-4ed9-b3e7-314eaf50d3b9" containerID="5ea9bfc288f950e806171a0b712108300218f8ee5765029b61ca1cb065d14df5" exitCode=0 Jan 20 17:18:14 crc kubenswrapper[4995]: I0120 17:18:14.663667 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" event={"ID":"896e00af-dc03-4ed9-b3e7-314eaf50d3b9","Type":"ContainerDied","Data":"5ea9bfc288f950e806171a0b712108300218f8ee5765029b61ca1cb065d14df5"} Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.148925 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.217914 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ssh-key-openstack-edpm-ipam\") pod \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.218019 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-2\") pod \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.218061 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-0\") pod \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.218247 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-inventory\") pod \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.218513 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-1\") pod \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.218556 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-telemetry-combined-ca-bundle\") pod \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.218592 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvhjs\" (UniqueName: \"kubernetes.io/projected/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-kube-api-access-jvhjs\") pod \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\" (UID: \"896e00af-dc03-4ed9-b3e7-314eaf50d3b9\") " Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.224303 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-kube-api-access-jvhjs" (OuterVolumeSpecName: "kube-api-access-jvhjs") pod "896e00af-dc03-4ed9-b3e7-314eaf50d3b9" (UID: "896e00af-dc03-4ed9-b3e7-314eaf50d3b9"). InnerVolumeSpecName "kube-api-access-jvhjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.224908 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "896e00af-dc03-4ed9-b3e7-314eaf50d3b9" (UID: "896e00af-dc03-4ed9-b3e7-314eaf50d3b9"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.252896 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "896e00af-dc03-4ed9-b3e7-314eaf50d3b9" (UID: "896e00af-dc03-4ed9-b3e7-314eaf50d3b9"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.258881 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-inventory" (OuterVolumeSpecName: "inventory") pod "896e00af-dc03-4ed9-b3e7-314eaf50d3b9" (UID: "896e00af-dc03-4ed9-b3e7-314eaf50d3b9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.262780 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "896e00af-dc03-4ed9-b3e7-314eaf50d3b9" (UID: "896e00af-dc03-4ed9-b3e7-314eaf50d3b9"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.262906 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "896e00af-dc03-4ed9-b3e7-314eaf50d3b9" (UID: "896e00af-dc03-4ed9-b3e7-314eaf50d3b9"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.283733 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "896e00af-dc03-4ed9-b3e7-314eaf50d3b9" (UID: "896e00af-dc03-4ed9-b3e7-314eaf50d3b9"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.322131 4995 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.322267 4995 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.322351 4995 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.322762 4995 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.322905 4995 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.322994 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvhjs\" (UniqueName: \"kubernetes.io/projected/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-kube-api-access-jvhjs\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.323088 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/896e00af-dc03-4ed9-b3e7-314eaf50d3b9-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.686867 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" event={"ID":"896e00af-dc03-4ed9-b3e7-314eaf50d3b9","Type":"ContainerDied","Data":"82d7e779c45943793269b7f2b977adbe3eb962f211c21f38b86de451a6358cc1"} Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.686912 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82d7e779c45943793269b7f2b977adbe3eb962f211c21f38b86de451a6358cc1" Jan 20 17:18:16 crc kubenswrapper[4995]: I0120 17:18:16.686976 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7" Jan 20 17:18:56 crc kubenswrapper[4995]: I0120 17:18:56.389733 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 17:18:56 crc kubenswrapper[4995]: I0120 17:18:56.390468 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="prometheus" containerID="cri-o://b6a446f2b1cb31d2aec998fcd6c1d6cfa56fd69253133b3c554dd530c0ed8bd1" gracePeriod=600 Jan 20 17:18:56 crc kubenswrapper[4995]: I0120 17:18:56.390775 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="thanos-sidecar" containerID="cri-o://dd48c16250e4e399de598759fcf60283ca4ba1c2d9f854a1f5c0207e0bd864c7" gracePeriod=600 Jan 20 17:18:56 crc kubenswrapper[4995]: I0120 17:18:56.391644 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="config-reloader" containerID="cri-o://08c4e805f0ea901d7a17afed9a299e1055337412cd83d4f2937427c65194932b" gracePeriod=600 Jan 20 17:18:56 crc kubenswrapper[4995]: I0120 17:18:56.916246 4995 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.144:9090/-/ready\": dial tcp 10.217.0.144:9090: connect: connection refused" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.133372 4995 generic.go:334] "Generic (PLEG): container finished" podID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerID="dd48c16250e4e399de598759fcf60283ca4ba1c2d9f854a1f5c0207e0bd864c7" exitCode=0 Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.133403 4995 generic.go:334] "Generic (PLEG): container finished" podID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerID="08c4e805f0ea901d7a17afed9a299e1055337412cd83d4f2937427c65194932b" exitCode=0 Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.133411 4995 generic.go:334] "Generic (PLEG): container finished" podID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerID="b6a446f2b1cb31d2aec998fcd6c1d6cfa56fd69253133b3c554dd530c0ed8bd1" exitCode=0 Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.133451 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerDied","Data":"dd48c16250e4e399de598759fcf60283ca4ba1c2d9f854a1f5c0207e0bd864c7"} Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.133492 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerDied","Data":"08c4e805f0ea901d7a17afed9a299e1055337412cd83d4f2937427c65194932b"} Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.133503 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerDied","Data":"b6a446f2b1cb31d2aec998fcd6c1d6cfa56fd69253133b3c554dd530c0ed8bd1"} Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.480768 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.615287 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-2\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.615727 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-thanos-prometheus-http-client-file\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.615766 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ff6bba1f-8556-411d-bba9-b0274703ffea-config-out\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.615793 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.615809 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.615824 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-config\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.616059 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-0\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.616181 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-1\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.616212 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.616259 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-secret-combined-ca-bundle\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.616367 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28k5z\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-kube-api-access-28k5z\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.616415 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-tls-assets\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.616471 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.616570 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"ff6bba1f-8556-411d-bba9-b0274703ffea\" (UID: \"ff6bba1f-8556-411d-bba9-b0274703ffea\") " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.617054 4995 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.617045 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.619557 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.623035 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.623105 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.624084 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.625201 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.627388 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-config" (OuterVolumeSpecName: "config") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.627425 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-kube-api-access-28k5z" (OuterVolumeSpecName: "kube-api-access-28k5z") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "kube-api-access-28k5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.627391 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff6bba1f-8556-411d-bba9-b0274703ffea-config-out" (OuterVolumeSpecName: "config-out") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.635208 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.659326 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719446 4995 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719494 4995 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/ff6bba1f-8556-411d-bba9-b0274703ffea-config-out\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719504 4995 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719515 4995 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-config\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719527 4995 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719535 4995 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/ff6bba1f-8556-411d-bba9-b0274703ffea-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719545 4995 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719572 4995 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719582 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28k5z\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-kube-api-access-28k5z\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719589 4995 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/ff6bba1f-8556-411d-bba9-b0274703ffea-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.719618 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") on node \"crc\" " Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.723506 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config" (OuterVolumeSpecName: "web-config") pod "ff6bba1f-8556-411d-bba9-b0274703ffea" (UID: "ff6bba1f-8556-411d-bba9-b0274703ffea"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.749412 4995 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.749569 4995 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f") on node "crc" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.821951 4995 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/ff6bba1f-8556-411d-bba9-b0274703ffea-web-config\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:57 crc kubenswrapper[4995]: I0120 17:18:57.821995 4995 reconciler_common.go:293] "Volume detached for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") on node \"crc\" DevicePath \"\"" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.145774 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"ff6bba1f-8556-411d-bba9-b0274703ffea","Type":"ContainerDied","Data":"cc12bb5667e9b3460aa9afe50fd053766d71bc6557fc5b07f5ea4dd7c1bb72fb"} Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.145848 4995 scope.go:117] "RemoveContainer" containerID="dd48c16250e4e399de598759fcf60283ca4ba1c2d9f854a1f5c0207e0bd864c7" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.145856 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.167368 4995 scope.go:117] "RemoveContainer" containerID="08c4e805f0ea901d7a17afed9a299e1055337412cd83d4f2937427c65194932b" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.174549 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.185190 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.204338 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 17:18:58 crc kubenswrapper[4995]: E0120 17:18:58.204714 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="config-reloader" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.204725 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="config-reloader" Jan 20 17:18:58 crc kubenswrapper[4995]: E0120 17:18:58.204735 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerName="extract-utilities" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.204741 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerName="extract-utilities" Jan 20 17:18:58 crc kubenswrapper[4995]: E0120 17:18:58.204750 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerName="registry-server" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.204756 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerName="registry-server" Jan 20 17:18:58 crc kubenswrapper[4995]: E0120 17:18:58.204768 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="896e00af-dc03-4ed9-b3e7-314eaf50d3b9" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.204774 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="896e00af-dc03-4ed9-b3e7-314eaf50d3b9" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 20 17:18:58 crc kubenswrapper[4995]: E0120 17:18:58.204788 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="prometheus" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.204795 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="prometheus" Jan 20 17:18:58 crc kubenswrapper[4995]: E0120 17:18:58.204805 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="init-config-reloader" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.204811 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="init-config-reloader" Jan 20 17:18:58 crc kubenswrapper[4995]: E0120 17:18:58.204817 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerName="extract-content" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.204823 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerName="extract-content" Jan 20 17:18:58 crc kubenswrapper[4995]: E0120 17:18:58.204837 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="thanos-sidecar" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.204843 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="thanos-sidecar" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.205009 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="config-reloader" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.205028 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="prometheus" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.205039 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbe718ac-d53d-435c-80d5-fc33e22ee3f2" containerName="registry-server" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.205049 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" containerName="thanos-sidecar" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.205061 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="896e00af-dc03-4ed9-b3e7-314eaf50d3b9" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.206814 4995 scope.go:117] "RemoveContainer" containerID="b6a446f2b1cb31d2aec998fcd6c1d6cfa56fd69253133b3c554dd530c0ed8bd1" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.207036 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.212722 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-58l4k" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.212919 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.212949 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.213129 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.213223 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.213358 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.214308 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.220301 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.223224 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.258255 4995 scope.go:117] "RemoveContainer" containerID="eea83ddf100f67421593176e2e56fe13438674fd192650c7c0c5a223a509466e" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.331482 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/69015998-1253-4181-99d4-1ea45f6ca788-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.331544 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.331633 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.331686 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbmz4\" (UniqueName: \"kubernetes.io/projected/69015998-1253-4181-99d4-1ea45f6ca788-kube-api-access-sbmz4\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.331720 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/69015998-1253-4181-99d4-1ea45f6ca788-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.331860 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/69015998-1253-4181-99d4-1ea45f6ca788-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.331914 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.332146 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/69015998-1253-4181-99d4-1ea45f6ca788-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.332208 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.332290 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.332315 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-config\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.332337 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.332461 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/69015998-1253-4181-99d4-1ea45f6ca788-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.433950 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/69015998-1253-4181-99d4-1ea45f6ca788-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.434278 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.434423 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/69015998-1253-4181-99d4-1ea45f6ca788-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.434525 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.434619 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.434694 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-config\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.434656 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/69015998-1253-4181-99d4-1ea45f6ca788-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.434769 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.434934 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/69015998-1253-4181-99d4-1ea45f6ca788-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.434968 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/69015998-1253-4181-99d4-1ea45f6ca788-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.435000 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.435044 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.435060 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/69015998-1253-4181-99d4-1ea45f6ca788-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.435119 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbmz4\" (UniqueName: \"kubernetes.io/projected/69015998-1253-4181-99d4-1ea45f6ca788-kube-api-access-sbmz4\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.435158 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/69015998-1253-4181-99d4-1ea45f6ca788-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.435604 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/69015998-1253-4181-99d4-1ea45f6ca788-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.450468 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/69015998-1253-4181-99d4-1ea45f6ca788-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.451396 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.451423 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-config\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.451839 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/69015998-1253-4181-99d4-1ea45f6ca788-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.452208 4995 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.452244 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a3a36306f7d3d2f24937466925b0b10e100df05e864ec7bc951230e86c72f354/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.452956 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.453058 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.457457 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.459679 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbmz4\" (UniqueName: \"kubernetes.io/projected/69015998-1253-4181-99d4-1ea45f6ca788-kube-api-access-sbmz4\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.461822 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69015998-1253-4181-99d4-1ea45f6ca788-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.582472 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d6b251ba-258b-43bc-b7e0-0c1d4a98ee5f\") pod \"prometheus-metric-storage-0\" (UID: \"69015998-1253-4181-99d4-1ea45f6ca788\") " pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:58 crc kubenswrapper[4995]: I0120 17:18:58.836693 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 20 17:18:59 crc kubenswrapper[4995]: I0120 17:18:59.327545 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 20 17:19:00 crc kubenswrapper[4995]: I0120 17:19:00.006686 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff6bba1f-8556-411d-bba9-b0274703ffea" path="/var/lib/kubelet/pods/ff6bba1f-8556-411d-bba9-b0274703ffea/volumes" Jan 20 17:19:00 crc kubenswrapper[4995]: I0120 17:19:00.167973 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"69015998-1253-4181-99d4-1ea45f6ca788","Type":"ContainerStarted","Data":"1563d49ba9d42d9182bc94a5b3fd2f2327fbf13e1820f6df8cb3d9166145cda9"} Jan 20 17:19:00 crc kubenswrapper[4995]: I0120 17:19:00.571677 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:19:00 crc kubenswrapper[4995]: I0120 17:19:00.571729 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:19:03 crc kubenswrapper[4995]: I0120 17:19:03.193628 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"69015998-1253-4181-99d4-1ea45f6ca788","Type":"ContainerStarted","Data":"d67708cabecbbf15d82ed3e612228fb57d1de50f87ab3c8992b1f5ea48baf509"} Jan 20 17:19:09 crc kubenswrapper[4995]: I0120 17:19:09.249874 4995 generic.go:334] "Generic (PLEG): container finished" podID="69015998-1253-4181-99d4-1ea45f6ca788" containerID="d67708cabecbbf15d82ed3e612228fb57d1de50f87ab3c8992b1f5ea48baf509" exitCode=0 Jan 20 17:19:09 crc kubenswrapper[4995]: I0120 17:19:09.249985 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"69015998-1253-4181-99d4-1ea45f6ca788","Type":"ContainerDied","Data":"d67708cabecbbf15d82ed3e612228fb57d1de50f87ab3c8992b1f5ea48baf509"} Jan 20 17:19:10 crc kubenswrapper[4995]: I0120 17:19:10.277183 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"69015998-1253-4181-99d4-1ea45f6ca788","Type":"ContainerStarted","Data":"518b9501df35e3ba4a73d55fcba44910c3e09f731c1b94cf328658ae6329f6e4"} Jan 20 17:19:14 crc kubenswrapper[4995]: I0120 17:19:14.314368 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"69015998-1253-4181-99d4-1ea45f6ca788","Type":"ContainerStarted","Data":"8707cd41d2786f63367a8b41147d05b41abd0cd288f8cdd9c6010376320f17f2"} Jan 20 17:19:14 crc kubenswrapper[4995]: I0120 17:19:14.314950 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"69015998-1253-4181-99d4-1ea45f6ca788","Type":"ContainerStarted","Data":"21bc4d530e5d8ff0dee7fc4566759d1dd172a35560118b6316e4e19f5b4bde65"} Jan 20 17:19:14 crc kubenswrapper[4995]: I0120 17:19:14.378977 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=16.378951918 podStartE2EDuration="16.378951918s" podCreationTimestamp="2026-01-20 17:18:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 17:19:14.351633299 +0000 UTC m=+2872.596238125" watchObservedRunningTime="2026-01-20 17:19:14.378951918 +0000 UTC m=+2872.623556724" Jan 20 17:19:18 crc kubenswrapper[4995]: I0120 17:19:18.837972 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 20 17:19:28 crc kubenswrapper[4995]: I0120 17:19:28.837811 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 20 17:19:28 crc kubenswrapper[4995]: I0120 17:19:28.847688 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 20 17:19:29 crc kubenswrapper[4995]: I0120 17:19:29.467383 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 20 17:19:30 crc kubenswrapper[4995]: I0120 17:19:30.572156 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:19:30 crc kubenswrapper[4995]: I0120 17:19:30.572227 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.888442 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.891415 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.894343 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-8fpfv" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.894827 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.894959 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.896911 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.898474 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.996394 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.996536 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.996737 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.996897 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.997055 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.997137 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.997172 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qz95\" (UniqueName: \"kubernetes.io/projected/356ca6c0-8604-40b3-b965-af9225ea185f-kube-api-access-4qz95\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.997205 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:35 crc kubenswrapper[4995]: I0120 17:19:35.997236 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-config-data\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.099224 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.099368 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.099400 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.099449 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.099540 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.099566 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.099585 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qz95\" (UniqueName: \"kubernetes.io/projected/356ca6c0-8604-40b3-b965-af9225ea185f-kube-api-access-4qz95\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.099607 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.099626 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-config-data\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.101139 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.101458 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.101590 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.101597 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.101782 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-config-data\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.105382 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.105677 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.109923 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.127318 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qz95\" (UniqueName: \"kubernetes.io/projected/356ca6c0-8604-40b3-b965-af9225ea185f-kube-api-access-4qz95\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.130458 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.222392 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.690389 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 20 17:19:36 crc kubenswrapper[4995]: W0120 17:19:36.691266 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod356ca6c0_8604_40b3_b965_af9225ea185f.slice/crio-26e68232d06f08bf7613e032f7be9ff0c9d70928c357fe98f9009b2f14d731f0 WatchSource:0}: Error finding container 26e68232d06f08bf7613e032f7be9ff0c9d70928c357fe98f9009b2f14d731f0: Status 404 returned error can't find the container with id 26e68232d06f08bf7613e032f7be9ff0c9d70928c357fe98f9009b2f14d731f0 Jan 20 17:19:36 crc kubenswrapper[4995]: I0120 17:19:36.693974 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 17:19:37 crc kubenswrapper[4995]: I0120 17:19:37.536572 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"356ca6c0-8604-40b3-b965-af9225ea185f","Type":"ContainerStarted","Data":"26e68232d06f08bf7613e032f7be9ff0c9d70928c357fe98f9009b2f14d731f0"} Jan 20 17:19:48 crc kubenswrapper[4995]: I0120 17:19:48.656623 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"356ca6c0-8604-40b3-b965-af9225ea185f","Type":"ContainerStarted","Data":"64a19a3fdb566ad3aa799b7509530c884fe8a53b2a5309785a02e13e44cdeb25"} Jan 20 17:19:48 crc kubenswrapper[4995]: I0120 17:19:48.681442 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.721116093 podStartE2EDuration="14.681401763s" podCreationTimestamp="2026-01-20 17:19:34 +0000 UTC" firstStartedPulling="2026-01-20 17:19:36.693318873 +0000 UTC m=+2894.937923679" lastFinishedPulling="2026-01-20 17:19:47.653604533 +0000 UTC m=+2905.898209349" observedRunningTime="2026-01-20 17:19:48.673569321 +0000 UTC m=+2906.918174147" watchObservedRunningTime="2026-01-20 17:19:48.681401763 +0000 UTC m=+2906.926006569" Jan 20 17:20:00 crc kubenswrapper[4995]: I0120 17:20:00.571616 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:20:00 crc kubenswrapper[4995]: I0120 17:20:00.572202 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:20:00 crc kubenswrapper[4995]: I0120 17:20:00.572260 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:20:00 crc kubenswrapper[4995]: I0120 17:20:00.573125 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:20:00 crc kubenswrapper[4995]: I0120 17:20:00.573193 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" gracePeriod=600 Jan 20 17:20:00 crc kubenswrapper[4995]: E0120 17:20:00.699747 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:20:00 crc kubenswrapper[4995]: I0120 17:20:00.805682 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" exitCode=0 Jan 20 17:20:00 crc kubenswrapper[4995]: I0120 17:20:00.805726 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06"} Jan 20 17:20:00 crc kubenswrapper[4995]: I0120 17:20:00.805767 4995 scope.go:117] "RemoveContainer" containerID="b1a719c573a74fe67fc6e576c604a61c3db39096bff0ed702a9e56ffca19d5c9" Jan 20 17:20:00 crc kubenswrapper[4995]: I0120 17:20:00.806586 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:20:00 crc kubenswrapper[4995]: E0120 17:20:00.806909 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:20:14 crc kubenswrapper[4995]: I0120 17:20:14.989777 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:20:14 crc kubenswrapper[4995]: E0120 17:20:14.991063 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:20:29 crc kubenswrapper[4995]: I0120 17:20:29.989587 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:20:29 crc kubenswrapper[4995]: E0120 17:20:29.990335 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:20:44 crc kubenswrapper[4995]: I0120 17:20:44.989663 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:20:44 crc kubenswrapper[4995]: E0120 17:20:44.991054 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:20:55 crc kubenswrapper[4995]: I0120 17:20:55.990857 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:20:55 crc kubenswrapper[4995]: E0120 17:20:55.991700 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:21:07 crc kubenswrapper[4995]: I0120 17:21:07.989398 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:21:07 crc kubenswrapper[4995]: E0120 17:21:07.990378 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:21:20 crc kubenswrapper[4995]: I0120 17:21:20.989606 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:21:20 crc kubenswrapper[4995]: E0120 17:21:20.990415 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:21:34 crc kubenswrapper[4995]: I0120 17:21:34.990319 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:21:34 crc kubenswrapper[4995]: E0120 17:21:34.991291 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:21:47 crc kubenswrapper[4995]: I0120 17:21:47.990284 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:21:47 crc kubenswrapper[4995]: E0120 17:21:47.991250 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:22:00 crc kubenswrapper[4995]: I0120 17:22:00.990226 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:22:00 crc kubenswrapper[4995]: E0120 17:22:00.991088 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:22:13 crc kubenswrapper[4995]: I0120 17:22:13.989857 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:22:13 crc kubenswrapper[4995]: E0120 17:22:13.990808 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:22:28 crc kubenswrapper[4995]: I0120 17:22:28.989446 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:22:28 crc kubenswrapper[4995]: E0120 17:22:28.990402 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:22:40 crc kubenswrapper[4995]: I0120 17:22:40.990586 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:22:40 crc kubenswrapper[4995]: E0120 17:22:40.991554 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:22:55 crc kubenswrapper[4995]: I0120 17:22:55.990209 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:22:55 crc kubenswrapper[4995]: E0120 17:22:55.991046 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:23:07 crc kubenswrapper[4995]: I0120 17:23:07.990936 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:23:07 crc kubenswrapper[4995]: E0120 17:23:07.991746 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:23:18 crc kubenswrapper[4995]: I0120 17:23:18.989275 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:23:18 crc kubenswrapper[4995]: E0120 17:23:18.991937 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:23:33 crc kubenswrapper[4995]: I0120 17:23:33.993926 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:23:33 crc kubenswrapper[4995]: E0120 17:23:33.996241 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:23:44 crc kubenswrapper[4995]: I0120 17:23:44.990826 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:23:44 crc kubenswrapper[4995]: E0120 17:23:44.992164 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:23:57 crc kubenswrapper[4995]: I0120 17:23:57.990587 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:23:57 crc kubenswrapper[4995]: E0120 17:23:57.991859 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:24:10 crc kubenswrapper[4995]: I0120 17:24:10.990474 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:24:10 crc kubenswrapper[4995]: E0120 17:24:10.991809 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:24:21 crc kubenswrapper[4995]: I0120 17:24:21.997171 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:24:21 crc kubenswrapper[4995]: E0120 17:24:21.998004 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:24:33 crc kubenswrapper[4995]: I0120 17:24:33.990015 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:24:33 crc kubenswrapper[4995]: E0120 17:24:33.990935 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.704432 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qx2md"] Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.709283 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.717677 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qx2md"] Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.803884 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-utilities\") pod \"certified-operators-qx2md\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.804126 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-catalog-content\") pod \"certified-operators-qx2md\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.804222 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvfrk\" (UniqueName: \"kubernetes.io/projected/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-kube-api-access-bvfrk\") pod \"certified-operators-qx2md\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.906463 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-utilities\") pod \"certified-operators-qx2md\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.906572 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-catalog-content\") pod \"certified-operators-qx2md\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.906606 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvfrk\" (UniqueName: \"kubernetes.io/projected/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-kube-api-access-bvfrk\") pod \"certified-operators-qx2md\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.906923 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-utilities\") pod \"certified-operators-qx2md\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.907059 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-catalog-content\") pod \"certified-operators-qx2md\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.924678 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvfrk\" (UniqueName: \"kubernetes.io/projected/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-kube-api-access-bvfrk\") pod \"certified-operators-qx2md\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:44 crc kubenswrapper[4995]: I0120 17:24:44.989617 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:24:44 crc kubenswrapper[4995]: E0120 17:24:44.989901 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:24:45 crc kubenswrapper[4995]: I0120 17:24:45.041363 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:45 crc kubenswrapper[4995]: I0120 17:24:45.531115 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qx2md"] Jan 20 17:24:46 crc kubenswrapper[4995]: I0120 17:24:46.010101 4995 generic.go:334] "Generic (PLEG): container finished" podID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerID="640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c" exitCode=0 Jan 20 17:24:46 crc kubenswrapper[4995]: I0120 17:24:46.010220 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qx2md" event={"ID":"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac","Type":"ContainerDied","Data":"640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c"} Jan 20 17:24:46 crc kubenswrapper[4995]: I0120 17:24:46.010520 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qx2md" event={"ID":"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac","Type":"ContainerStarted","Data":"a3c24167f50ab79753e28feb118756d753c1593186f92ade8aaefa8640338f0f"} Jan 20 17:24:46 crc kubenswrapper[4995]: I0120 17:24:46.012240 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 17:24:47 crc kubenswrapper[4995]: I0120 17:24:47.021666 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qx2md" event={"ID":"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac","Type":"ContainerStarted","Data":"97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2"} Jan 20 17:24:47 crc kubenswrapper[4995]: I0120 17:24:47.899031 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7sgz6"] Jan 20 17:24:47 crc kubenswrapper[4995]: I0120 17:24:47.901067 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:47 crc kubenswrapper[4995]: I0120 17:24:47.914724 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sgz6"] Jan 20 17:24:47 crc kubenswrapper[4995]: I0120 17:24:47.965909 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-catalog-content\") pod \"redhat-marketplace-7sgz6\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:47 crc kubenswrapper[4995]: I0120 17:24:47.965992 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-utilities\") pod \"redhat-marketplace-7sgz6\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:47 crc kubenswrapper[4995]: I0120 17:24:47.966206 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc8tk\" (UniqueName: \"kubernetes.io/projected/f68ae6c2-420f-4316-a393-83ddb3f94398-kube-api-access-pc8tk\") pod \"redhat-marketplace-7sgz6\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:48 crc kubenswrapper[4995]: I0120 17:24:48.068627 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-catalog-content\") pod \"redhat-marketplace-7sgz6\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:48 crc kubenswrapper[4995]: I0120 17:24:48.068760 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-utilities\") pod \"redhat-marketplace-7sgz6\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:48 crc kubenswrapper[4995]: I0120 17:24:48.068835 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc8tk\" (UniqueName: \"kubernetes.io/projected/f68ae6c2-420f-4316-a393-83ddb3f94398-kube-api-access-pc8tk\") pod \"redhat-marketplace-7sgz6\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:48 crc kubenswrapper[4995]: I0120 17:24:48.069268 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-utilities\") pod \"redhat-marketplace-7sgz6\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:48 crc kubenswrapper[4995]: I0120 17:24:48.069266 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-catalog-content\") pod \"redhat-marketplace-7sgz6\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:48 crc kubenswrapper[4995]: I0120 17:24:48.092068 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc8tk\" (UniqueName: \"kubernetes.io/projected/f68ae6c2-420f-4316-a393-83ddb3f94398-kube-api-access-pc8tk\") pod \"redhat-marketplace-7sgz6\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:48 crc kubenswrapper[4995]: I0120 17:24:48.226708 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:48 crc kubenswrapper[4995]: I0120 17:24:48.740543 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sgz6"] Jan 20 17:24:48 crc kubenswrapper[4995]: W0120 17:24:48.747394 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf68ae6c2_420f_4316_a393_83ddb3f94398.slice/crio-547c414e4a179471e5c381634f86ef8495e63cf9a35d7effff59e1ac5fea4db9 WatchSource:0}: Error finding container 547c414e4a179471e5c381634f86ef8495e63cf9a35d7effff59e1ac5fea4db9: Status 404 returned error can't find the container with id 547c414e4a179471e5c381634f86ef8495e63cf9a35d7effff59e1ac5fea4db9 Jan 20 17:24:49 crc kubenswrapper[4995]: I0120 17:24:49.040707 4995 generic.go:334] "Generic (PLEG): container finished" podID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerID="97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2" exitCode=0 Jan 20 17:24:49 crc kubenswrapper[4995]: I0120 17:24:49.040769 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qx2md" event={"ID":"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac","Type":"ContainerDied","Data":"97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2"} Jan 20 17:24:49 crc kubenswrapper[4995]: I0120 17:24:49.044116 4995 generic.go:334] "Generic (PLEG): container finished" podID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerID="f974f166228f9d65a535ea6351b1353aaa12af1700d3329fe1a7fe39ef8702ae" exitCode=0 Jan 20 17:24:49 crc kubenswrapper[4995]: I0120 17:24:49.044145 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sgz6" event={"ID":"f68ae6c2-420f-4316-a393-83ddb3f94398","Type":"ContainerDied","Data":"f974f166228f9d65a535ea6351b1353aaa12af1700d3329fe1a7fe39ef8702ae"} Jan 20 17:24:49 crc kubenswrapper[4995]: I0120 17:24:49.044169 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sgz6" event={"ID":"f68ae6c2-420f-4316-a393-83ddb3f94398","Type":"ContainerStarted","Data":"547c414e4a179471e5c381634f86ef8495e63cf9a35d7effff59e1ac5fea4db9"} Jan 20 17:24:50 crc kubenswrapper[4995]: I0120 17:24:50.058635 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qx2md" event={"ID":"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac","Type":"ContainerStarted","Data":"ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f"} Jan 20 17:24:50 crc kubenswrapper[4995]: I0120 17:24:50.061107 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sgz6" event={"ID":"f68ae6c2-420f-4316-a393-83ddb3f94398","Type":"ContainerStarted","Data":"a18050ed5784ec23604d4cb5e90f16fcac834c0547165bd793ad5e89b9c75cf3"} Jan 20 17:24:50 crc kubenswrapper[4995]: I0120 17:24:50.088295 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qx2md" podStartSLOduration=2.540252619 podStartE2EDuration="6.088278702s" podCreationTimestamp="2026-01-20 17:24:44 +0000 UTC" firstStartedPulling="2026-01-20 17:24:46.012029587 +0000 UTC m=+3204.256634393" lastFinishedPulling="2026-01-20 17:24:49.56005567 +0000 UTC m=+3207.804660476" observedRunningTime="2026-01-20 17:24:50.078503398 +0000 UTC m=+3208.323108214" watchObservedRunningTime="2026-01-20 17:24:50.088278702 +0000 UTC m=+3208.332883508" Jan 20 17:24:51 crc kubenswrapper[4995]: I0120 17:24:51.071356 4995 generic.go:334] "Generic (PLEG): container finished" podID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerID="a18050ed5784ec23604d4cb5e90f16fcac834c0547165bd793ad5e89b9c75cf3" exitCode=0 Jan 20 17:24:51 crc kubenswrapper[4995]: I0120 17:24:51.071423 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sgz6" event={"ID":"f68ae6c2-420f-4316-a393-83ddb3f94398","Type":"ContainerDied","Data":"a18050ed5784ec23604d4cb5e90f16fcac834c0547165bd793ad5e89b9c75cf3"} Jan 20 17:24:52 crc kubenswrapper[4995]: I0120 17:24:52.082602 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sgz6" event={"ID":"f68ae6c2-420f-4316-a393-83ddb3f94398","Type":"ContainerStarted","Data":"2e207f5ab2c737c7a713acf086120538b3621008cece639c0f35509221de04e6"} Jan 20 17:24:52 crc kubenswrapper[4995]: I0120 17:24:52.117216 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7sgz6" podStartSLOduration=2.610536108 podStartE2EDuration="5.117194002s" podCreationTimestamp="2026-01-20 17:24:47 +0000 UTC" firstStartedPulling="2026-01-20 17:24:49.046545585 +0000 UTC m=+3207.291150401" lastFinishedPulling="2026-01-20 17:24:51.553203489 +0000 UTC m=+3209.797808295" observedRunningTime="2026-01-20 17:24:52.10164649 +0000 UTC m=+3210.346251296" watchObservedRunningTime="2026-01-20 17:24:52.117194002 +0000 UTC m=+3210.361798818" Jan 20 17:24:55 crc kubenswrapper[4995]: I0120 17:24:55.041952 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:55 crc kubenswrapper[4995]: I0120 17:24:55.042311 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:55 crc kubenswrapper[4995]: I0120 17:24:55.092726 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:55 crc kubenswrapper[4995]: I0120 17:24:55.164449 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:55 crc kubenswrapper[4995]: I0120 17:24:55.495309 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qx2md"] Jan 20 17:24:55 crc kubenswrapper[4995]: I0120 17:24:55.989357 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:24:55 crc kubenswrapper[4995]: E0120 17:24:55.989715 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.139410 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qx2md" podUID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerName="registry-server" containerID="cri-o://ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f" gracePeriod=2 Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.646415 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.768860 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvfrk\" (UniqueName: \"kubernetes.io/projected/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-kube-api-access-bvfrk\") pod \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.769029 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-catalog-content\") pod \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.769069 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-utilities\") pod \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\" (UID: \"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac\") " Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.770091 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-utilities" (OuterVolumeSpecName: "utilities") pod "ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" (UID: "ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.774638 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-kube-api-access-bvfrk" (OuterVolumeSpecName: "kube-api-access-bvfrk") pod "ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" (UID: "ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac"). InnerVolumeSpecName "kube-api-access-bvfrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.822569 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" (UID: "ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.871345 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.871602 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:24:57 crc kubenswrapper[4995]: I0120 17:24:57.871613 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvfrk\" (UniqueName: \"kubernetes.io/projected/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac-kube-api-access-bvfrk\") on node \"crc\" DevicePath \"\"" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.150219 4995 generic.go:334] "Generic (PLEG): container finished" podID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerID="ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f" exitCode=0 Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.150273 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qx2md" event={"ID":"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac","Type":"ContainerDied","Data":"ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f"} Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.150335 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qx2md" event={"ID":"ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac","Type":"ContainerDied","Data":"a3c24167f50ab79753e28feb118756d753c1593186f92ade8aaefa8640338f0f"} Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.150357 4995 scope.go:117] "RemoveContainer" containerID="ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.150435 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qx2md" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.173157 4995 scope.go:117] "RemoveContainer" containerID="97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.188791 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qx2md"] Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.201368 4995 scope.go:117] "RemoveContainer" containerID="640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.207421 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qx2md"] Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.232128 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.232258 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.252609 4995 scope.go:117] "RemoveContainer" containerID="ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f" Jan 20 17:24:58 crc kubenswrapper[4995]: E0120 17:24:58.253171 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f\": container with ID starting with ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f not found: ID does not exist" containerID="ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.253266 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f"} err="failed to get container status \"ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f\": rpc error: code = NotFound desc = could not find container \"ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f\": container with ID starting with ef54bf22735b64fcca9751d8c594e8820b42742fa80fd1e7d76c78f5d80e169f not found: ID does not exist" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.253343 4995 scope.go:117] "RemoveContainer" containerID="97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2" Jan 20 17:24:58 crc kubenswrapper[4995]: E0120 17:24:58.253692 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2\": container with ID starting with 97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2 not found: ID does not exist" containerID="97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.253734 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2"} err="failed to get container status \"97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2\": rpc error: code = NotFound desc = could not find container \"97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2\": container with ID starting with 97753aab58ab71a5f56a08cf61068398622bc1e9906a3fc9462307d81b3c48a2 not found: ID does not exist" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.253766 4995 scope.go:117] "RemoveContainer" containerID="640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c" Jan 20 17:24:58 crc kubenswrapper[4995]: E0120 17:24:58.254036 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c\": container with ID starting with 640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c not found: ID does not exist" containerID="640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.254096 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c"} err="failed to get container status \"640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c\": rpc error: code = NotFound desc = could not find container \"640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c\": container with ID starting with 640da870f69057c1fb3e069fab1be692b622462248127397d0d37af649fc3f4c not found: ID does not exist" Jan 20 17:24:58 crc kubenswrapper[4995]: I0120 17:24:58.287982 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:24:59 crc kubenswrapper[4995]: I0120 17:24:59.215794 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:25:00 crc kubenswrapper[4995]: I0120 17:25:00.001856 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" path="/var/lib/kubelet/pods/ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac/volumes" Jan 20 17:25:00 crc kubenswrapper[4995]: I0120 17:25:00.691381 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sgz6"] Jan 20 17:25:02 crc kubenswrapper[4995]: I0120 17:25:02.193490 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7sgz6" podUID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerName="registry-server" containerID="cri-o://2e207f5ab2c737c7a713acf086120538b3621008cece639c0f35509221de04e6" gracePeriod=2 Jan 20 17:25:03 crc kubenswrapper[4995]: I0120 17:25:03.204719 4995 generic.go:334] "Generic (PLEG): container finished" podID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerID="2e207f5ab2c737c7a713acf086120538b3621008cece639c0f35509221de04e6" exitCode=0 Jan 20 17:25:03 crc kubenswrapper[4995]: I0120 17:25:03.204761 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sgz6" event={"ID":"f68ae6c2-420f-4316-a393-83ddb3f94398","Type":"ContainerDied","Data":"2e207f5ab2c737c7a713acf086120538b3621008cece639c0f35509221de04e6"} Jan 20 17:25:03 crc kubenswrapper[4995]: I0120 17:25:03.832187 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:25:03 crc kubenswrapper[4995]: I0120 17:25:03.979993 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-utilities\") pod \"f68ae6c2-420f-4316-a393-83ddb3f94398\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " Jan 20 17:25:03 crc kubenswrapper[4995]: I0120 17:25:03.980044 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-catalog-content\") pod \"f68ae6c2-420f-4316-a393-83ddb3f94398\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " Jan 20 17:25:03 crc kubenswrapper[4995]: I0120 17:25:03.980098 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pc8tk\" (UniqueName: \"kubernetes.io/projected/f68ae6c2-420f-4316-a393-83ddb3f94398-kube-api-access-pc8tk\") pod \"f68ae6c2-420f-4316-a393-83ddb3f94398\" (UID: \"f68ae6c2-420f-4316-a393-83ddb3f94398\") " Jan 20 17:25:03 crc kubenswrapper[4995]: I0120 17:25:03.981096 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-utilities" (OuterVolumeSpecName: "utilities") pod "f68ae6c2-420f-4316-a393-83ddb3f94398" (UID: "f68ae6c2-420f-4316-a393-83ddb3f94398"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:25:03 crc kubenswrapper[4995]: I0120 17:25:03.994276 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f68ae6c2-420f-4316-a393-83ddb3f94398-kube-api-access-pc8tk" (OuterVolumeSpecName: "kube-api-access-pc8tk") pod "f68ae6c2-420f-4316-a393-83ddb3f94398" (UID: "f68ae6c2-420f-4316-a393-83ddb3f94398"). InnerVolumeSpecName "kube-api-access-pc8tk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.001535 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f68ae6c2-420f-4316-a393-83ddb3f94398" (UID: "f68ae6c2-420f-4316-a393-83ddb3f94398"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.082607 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.082643 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68ae6c2-420f-4316-a393-83ddb3f94398-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.082654 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pc8tk\" (UniqueName: \"kubernetes.io/projected/f68ae6c2-420f-4316-a393-83ddb3f94398-kube-api-access-pc8tk\") on node \"crc\" DevicePath \"\"" Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.215587 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sgz6" event={"ID":"f68ae6c2-420f-4316-a393-83ddb3f94398","Type":"ContainerDied","Data":"547c414e4a179471e5c381634f86ef8495e63cf9a35d7effff59e1ac5fea4db9"} Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.215638 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7sgz6" Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.215651 4995 scope.go:117] "RemoveContainer" containerID="2e207f5ab2c737c7a713acf086120538b3621008cece639c0f35509221de04e6" Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.247545 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sgz6"] Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.250679 4995 scope.go:117] "RemoveContainer" containerID="a18050ed5784ec23604d4cb5e90f16fcac834c0547165bd793ad5e89b9c75cf3" Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.258144 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sgz6"] Jan 20 17:25:04 crc kubenswrapper[4995]: I0120 17:25:04.271316 4995 scope.go:117] "RemoveContainer" containerID="f974f166228f9d65a535ea6351b1353aaa12af1700d3329fe1a7fe39ef8702ae" Jan 20 17:25:05 crc kubenswrapper[4995]: I0120 17:25:05.999363 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f68ae6c2-420f-4316-a393-83ddb3f94398" path="/var/lib/kubelet/pods/f68ae6c2-420f-4316-a393-83ddb3f94398/volumes" Jan 20 17:25:06 crc kubenswrapper[4995]: I0120 17:25:06.989881 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:25:07 crc kubenswrapper[4995]: I0120 17:25:07.245655 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"02400bc0fe91a996fa273f3cc04c8ce1fbf178cab39d7cb7347fd63dff277b28"} Jan 20 17:27:30 crc kubenswrapper[4995]: I0120 17:27:30.572391 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:27:30 crc kubenswrapper[4995]: I0120 17:27:30.573264 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:28:00 crc kubenswrapper[4995]: I0120 17:28:00.572023 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:28:00 crc kubenswrapper[4995]: I0120 17:28:00.572638 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:28:30 crc kubenswrapper[4995]: I0120 17:28:30.571387 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:28:30 crc kubenswrapper[4995]: I0120 17:28:30.572963 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:28:30 crc kubenswrapper[4995]: I0120 17:28:30.573072 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:28:30 crc kubenswrapper[4995]: I0120 17:28:30.573931 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"02400bc0fe91a996fa273f3cc04c8ce1fbf178cab39d7cb7347fd63dff277b28"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:28:30 crc kubenswrapper[4995]: I0120 17:28:30.574088 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://02400bc0fe91a996fa273f3cc04c8ce1fbf178cab39d7cb7347fd63dff277b28" gracePeriod=600 Jan 20 17:28:30 crc kubenswrapper[4995]: I0120 17:28:30.768896 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="02400bc0fe91a996fa273f3cc04c8ce1fbf178cab39d7cb7347fd63dff277b28" exitCode=0 Jan 20 17:28:30 crc kubenswrapper[4995]: I0120 17:28:30.768967 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"02400bc0fe91a996fa273f3cc04c8ce1fbf178cab39d7cb7347fd63dff277b28"} Jan 20 17:28:30 crc kubenswrapper[4995]: I0120 17:28:30.769548 4995 scope.go:117] "RemoveContainer" containerID="8808d1848220fbebea683c50bab78662c8a48fc24ca6ab4ddfaae4e134909f06" Jan 20 17:28:31 crc kubenswrapper[4995]: I0120 17:28:31.785314 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137"} Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.142967 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn"] Jan 20 17:30:00 crc kubenswrapper[4995]: E0120 17:30:00.144109 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerName="extract-content" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.144126 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerName="extract-content" Jan 20 17:30:00 crc kubenswrapper[4995]: E0120 17:30:00.144155 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerName="extract-utilities" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.144164 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerName="extract-utilities" Jan 20 17:30:00 crc kubenswrapper[4995]: E0120 17:30:00.144180 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerName="extract-content" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.144189 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerName="extract-content" Jan 20 17:30:00 crc kubenswrapper[4995]: E0120 17:30:00.144208 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerName="extract-utilities" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.144215 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerName="extract-utilities" Jan 20 17:30:00 crc kubenswrapper[4995]: E0120 17:30:00.144236 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerName="registry-server" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.144243 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerName="registry-server" Jan 20 17:30:00 crc kubenswrapper[4995]: E0120 17:30:00.144259 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerName="registry-server" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.144266 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerName="registry-server" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.144523 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f68ae6c2-420f-4316-a393-83ddb3f94398" containerName="registry-server" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.144540 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce10c612-7fd8-4aaa-be0d-9b5ea79c0aac" containerName="registry-server" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.145403 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.147481 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.147580 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.155339 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn"] Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.239293 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/820a1e7e-7454-46fe-8b07-3d9074b53c22-config-volume\") pod \"collect-profiles-29482170-5wfqn\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.239665 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/820a1e7e-7454-46fe-8b07-3d9074b53c22-secret-volume\") pod \"collect-profiles-29482170-5wfqn\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.239793 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2l9f\" (UniqueName: \"kubernetes.io/projected/820a1e7e-7454-46fe-8b07-3d9074b53c22-kube-api-access-q2l9f\") pod \"collect-profiles-29482170-5wfqn\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.343168 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/820a1e7e-7454-46fe-8b07-3d9074b53c22-secret-volume\") pod \"collect-profiles-29482170-5wfqn\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.343275 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2l9f\" (UniqueName: \"kubernetes.io/projected/820a1e7e-7454-46fe-8b07-3d9074b53c22-kube-api-access-q2l9f\") pod \"collect-profiles-29482170-5wfqn\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.343325 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/820a1e7e-7454-46fe-8b07-3d9074b53c22-config-volume\") pod \"collect-profiles-29482170-5wfqn\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.344587 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/820a1e7e-7454-46fe-8b07-3d9074b53c22-config-volume\") pod \"collect-profiles-29482170-5wfqn\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.352318 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/820a1e7e-7454-46fe-8b07-3d9074b53c22-secret-volume\") pod \"collect-profiles-29482170-5wfqn\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.363299 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2l9f\" (UniqueName: \"kubernetes.io/projected/820a1e7e-7454-46fe-8b07-3d9074b53c22-kube-api-access-q2l9f\") pod \"collect-profiles-29482170-5wfqn\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:00 crc kubenswrapper[4995]: I0120 17:30:00.473480 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:01 crc kubenswrapper[4995]: I0120 17:30:01.059104 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn"] Jan 20 17:30:01 crc kubenswrapper[4995]: I0120 17:30:01.891464 4995 generic.go:334] "Generic (PLEG): container finished" podID="820a1e7e-7454-46fe-8b07-3d9074b53c22" containerID="ad66521b9ded8aab52f28247fc2be0d95c8ba864b4aaad893a4ea860e26ad68c" exitCode=0 Jan 20 17:30:01 crc kubenswrapper[4995]: I0120 17:30:01.891523 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" event={"ID":"820a1e7e-7454-46fe-8b07-3d9074b53c22","Type":"ContainerDied","Data":"ad66521b9ded8aab52f28247fc2be0d95c8ba864b4aaad893a4ea860e26ad68c"} Jan 20 17:30:01 crc kubenswrapper[4995]: I0120 17:30:01.891877 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" event={"ID":"820a1e7e-7454-46fe-8b07-3d9074b53c22","Type":"ContainerStarted","Data":"be5e230d1f8f6b318e91644eb96ff1c0e7b658e8821d5654e39cebdd6318ba6d"} Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.295200 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.406298 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/820a1e7e-7454-46fe-8b07-3d9074b53c22-secret-volume\") pod \"820a1e7e-7454-46fe-8b07-3d9074b53c22\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.406457 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2l9f\" (UniqueName: \"kubernetes.io/projected/820a1e7e-7454-46fe-8b07-3d9074b53c22-kube-api-access-q2l9f\") pod \"820a1e7e-7454-46fe-8b07-3d9074b53c22\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.406589 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/820a1e7e-7454-46fe-8b07-3d9074b53c22-config-volume\") pod \"820a1e7e-7454-46fe-8b07-3d9074b53c22\" (UID: \"820a1e7e-7454-46fe-8b07-3d9074b53c22\") " Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.407389 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/820a1e7e-7454-46fe-8b07-3d9074b53c22-config-volume" (OuterVolumeSpecName: "config-volume") pod "820a1e7e-7454-46fe-8b07-3d9074b53c22" (UID: "820a1e7e-7454-46fe-8b07-3d9074b53c22"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.413292 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/820a1e7e-7454-46fe-8b07-3d9074b53c22-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "820a1e7e-7454-46fe-8b07-3d9074b53c22" (UID: "820a1e7e-7454-46fe-8b07-3d9074b53c22"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.414188 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/820a1e7e-7454-46fe-8b07-3d9074b53c22-kube-api-access-q2l9f" (OuterVolumeSpecName: "kube-api-access-q2l9f") pod "820a1e7e-7454-46fe-8b07-3d9074b53c22" (UID: "820a1e7e-7454-46fe-8b07-3d9074b53c22"). InnerVolumeSpecName "kube-api-access-q2l9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.509062 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2l9f\" (UniqueName: \"kubernetes.io/projected/820a1e7e-7454-46fe-8b07-3d9074b53c22-kube-api-access-q2l9f\") on node \"crc\" DevicePath \"\"" Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.509104 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/820a1e7e-7454-46fe-8b07-3d9074b53c22-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.509115 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/820a1e7e-7454-46fe-8b07-3d9074b53c22-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.915104 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" event={"ID":"820a1e7e-7454-46fe-8b07-3d9074b53c22","Type":"ContainerDied","Data":"be5e230d1f8f6b318e91644eb96ff1c0e7b658e8821d5654e39cebdd6318ba6d"} Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.915389 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be5e230d1f8f6b318e91644eb96ff1c0e7b658e8821d5654e39cebdd6318ba6d" Jan 20 17:30:03 crc kubenswrapper[4995]: I0120 17:30:03.915141 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn" Jan 20 17:30:04 crc kubenswrapper[4995]: I0120 17:30:04.721846 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7"] Jan 20 17:30:04 crc kubenswrapper[4995]: I0120 17:30:04.734832 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482125-5cbs7"] Jan 20 17:30:06 crc kubenswrapper[4995]: I0120 17:30:06.003710 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca405ab9-f0ea-489f-bcec-8e6e686e66af" path="/var/lib/kubelet/pods/ca405ab9-f0ea-489f-bcec-8e6e686e66af/volumes" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.700560 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t7v88"] Jan 20 17:30:26 crc kubenswrapper[4995]: E0120 17:30:26.701812 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="820a1e7e-7454-46fe-8b07-3d9074b53c22" containerName="collect-profiles" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.701829 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="820a1e7e-7454-46fe-8b07-3d9074b53c22" containerName="collect-profiles" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.702228 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="820a1e7e-7454-46fe-8b07-3d9074b53c22" containerName="collect-profiles" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.705508 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.720334 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t7v88"] Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.887403 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-utilities\") pod \"redhat-operators-t7v88\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.887541 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-catalog-content\") pod \"redhat-operators-t7v88\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.887600 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rtcp\" (UniqueName: \"kubernetes.io/projected/2b18701b-67b6-48ae-aa37-d13976a66207-kube-api-access-6rtcp\") pod \"redhat-operators-t7v88\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.989997 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-catalog-content\") pod \"redhat-operators-t7v88\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.990558 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-catalog-content\") pod \"redhat-operators-t7v88\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.990704 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rtcp\" (UniqueName: \"kubernetes.io/projected/2b18701b-67b6-48ae-aa37-d13976a66207-kube-api-access-6rtcp\") pod \"redhat-operators-t7v88\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.990990 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-utilities\") pod \"redhat-operators-t7v88\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:26 crc kubenswrapper[4995]: I0120 17:30:26.991399 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-utilities\") pod \"redhat-operators-t7v88\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:27 crc kubenswrapper[4995]: I0120 17:30:27.015876 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rtcp\" (UniqueName: \"kubernetes.io/projected/2b18701b-67b6-48ae-aa37-d13976a66207-kube-api-access-6rtcp\") pod \"redhat-operators-t7v88\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:27 crc kubenswrapper[4995]: I0120 17:30:27.027755 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:27 crc kubenswrapper[4995]: I0120 17:30:27.498649 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t7v88"] Jan 20 17:30:28 crc kubenswrapper[4995]: I0120 17:30:28.159649 4995 generic.go:334] "Generic (PLEG): container finished" podID="2b18701b-67b6-48ae-aa37-d13976a66207" containerID="44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10" exitCode=0 Jan 20 17:30:28 crc kubenswrapper[4995]: I0120 17:30:28.159837 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7v88" event={"ID":"2b18701b-67b6-48ae-aa37-d13976a66207","Type":"ContainerDied","Data":"44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10"} Jan 20 17:30:28 crc kubenswrapper[4995]: I0120 17:30:28.159938 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7v88" event={"ID":"2b18701b-67b6-48ae-aa37-d13976a66207","Type":"ContainerStarted","Data":"bde3a09afbd53a088dee4a069141ac57bc12375016bacd84db3a08afebf302b0"} Jan 20 17:30:28 crc kubenswrapper[4995]: I0120 17:30:28.162011 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 17:30:29 crc kubenswrapper[4995]: I0120 17:30:29.170959 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7v88" event={"ID":"2b18701b-67b6-48ae-aa37-d13976a66207","Type":"ContainerStarted","Data":"95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a"} Jan 20 17:30:29 crc kubenswrapper[4995]: I0120 17:30:29.896593 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-s6jl8"] Jan 20 17:30:29 crc kubenswrapper[4995]: I0120 17:30:29.898397 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:29 crc kubenswrapper[4995]: I0120 17:30:29.918624 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s6jl8"] Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.075786 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k55xv\" (UniqueName: \"kubernetes.io/projected/65d90b1f-153b-4f40-8f43-cf949cd067f3-kube-api-access-k55xv\") pod \"community-operators-s6jl8\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.076174 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-utilities\") pod \"community-operators-s6jl8\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.076290 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-catalog-content\") pod \"community-operators-s6jl8\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.177868 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k55xv\" (UniqueName: \"kubernetes.io/projected/65d90b1f-153b-4f40-8f43-cf949cd067f3-kube-api-access-k55xv\") pod \"community-operators-s6jl8\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.177972 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-utilities\") pod \"community-operators-s6jl8\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.178119 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-catalog-content\") pod \"community-operators-s6jl8\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.180586 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-utilities\") pod \"community-operators-s6jl8\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.180599 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-catalog-content\") pod \"community-operators-s6jl8\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.201216 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k55xv\" (UniqueName: \"kubernetes.io/projected/65d90b1f-153b-4f40-8f43-cf949cd067f3-kube-api-access-k55xv\") pod \"community-operators-s6jl8\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.217838 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.571666 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.571947 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:30:30 crc kubenswrapper[4995]: I0120 17:30:30.801309 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s6jl8"] Jan 20 17:30:30 crc kubenswrapper[4995]: W0120 17:30:30.813686 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65d90b1f_153b_4f40_8f43_cf949cd067f3.slice/crio-6d1f202fde900eae74eaef12bf8e326e63921e75ef9c6137b95dde9d306409ba WatchSource:0}: Error finding container 6d1f202fde900eae74eaef12bf8e326e63921e75ef9c6137b95dde9d306409ba: Status 404 returned error can't find the container with id 6d1f202fde900eae74eaef12bf8e326e63921e75ef9c6137b95dde9d306409ba Jan 20 17:30:31 crc kubenswrapper[4995]: I0120 17:30:31.189799 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6jl8" event={"ID":"65d90b1f-153b-4f40-8f43-cf949cd067f3","Type":"ContainerStarted","Data":"7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338"} Jan 20 17:30:31 crc kubenswrapper[4995]: I0120 17:30:31.190015 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6jl8" event={"ID":"65d90b1f-153b-4f40-8f43-cf949cd067f3","Type":"ContainerStarted","Data":"6d1f202fde900eae74eaef12bf8e326e63921e75ef9c6137b95dde9d306409ba"} Jan 20 17:30:31 crc kubenswrapper[4995]: E0120 17:30:31.620526 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b18701b_67b6_48ae_aa37_d13976a66207.slice/crio-conmon-95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a.scope\": RecentStats: unable to find data in memory cache]" Jan 20 17:30:32 crc kubenswrapper[4995]: I0120 17:30:32.200398 4995 generic.go:334] "Generic (PLEG): container finished" podID="2b18701b-67b6-48ae-aa37-d13976a66207" containerID="95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a" exitCode=0 Jan 20 17:30:32 crc kubenswrapper[4995]: I0120 17:30:32.200585 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7v88" event={"ID":"2b18701b-67b6-48ae-aa37-d13976a66207","Type":"ContainerDied","Data":"95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a"} Jan 20 17:30:33 crc kubenswrapper[4995]: I0120 17:30:33.230064 4995 generic.go:334] "Generic (PLEG): container finished" podID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerID="7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338" exitCode=0 Jan 20 17:30:33 crc kubenswrapper[4995]: I0120 17:30:33.230118 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6jl8" event={"ID":"65d90b1f-153b-4f40-8f43-cf949cd067f3","Type":"ContainerDied","Data":"7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338"} Jan 20 17:30:33 crc kubenswrapper[4995]: I0120 17:30:33.233199 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7v88" event={"ID":"2b18701b-67b6-48ae-aa37-d13976a66207","Type":"ContainerStarted","Data":"6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c"} Jan 20 17:30:33 crc kubenswrapper[4995]: I0120 17:30:33.277431 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t7v88" podStartSLOduration=2.743118153 podStartE2EDuration="7.277412775s" podCreationTimestamp="2026-01-20 17:30:26 +0000 UTC" firstStartedPulling="2026-01-20 17:30:28.161706821 +0000 UTC m=+3546.406311627" lastFinishedPulling="2026-01-20 17:30:32.696001443 +0000 UTC m=+3550.940606249" observedRunningTime="2026-01-20 17:30:33.272998695 +0000 UTC m=+3551.517603501" watchObservedRunningTime="2026-01-20 17:30:33.277412775 +0000 UTC m=+3551.522017581" Jan 20 17:30:34 crc kubenswrapper[4995]: I0120 17:30:34.440520 4995 scope.go:117] "RemoveContainer" containerID="ae7c442644900fbdc71848c90b976e434c4defb574ecc6013f95083d82d434c4" Jan 20 17:30:35 crc kubenswrapper[4995]: I0120 17:30:35.252625 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6jl8" event={"ID":"65d90b1f-153b-4f40-8f43-cf949cd067f3","Type":"ContainerStarted","Data":"371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3"} Jan 20 17:30:36 crc kubenswrapper[4995]: I0120 17:30:36.263335 4995 generic.go:334] "Generic (PLEG): container finished" podID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerID="371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3" exitCode=0 Jan 20 17:30:36 crc kubenswrapper[4995]: I0120 17:30:36.263377 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6jl8" event={"ID":"65d90b1f-153b-4f40-8f43-cf949cd067f3","Type":"ContainerDied","Data":"371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3"} Jan 20 17:30:37 crc kubenswrapper[4995]: I0120 17:30:37.028816 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:37 crc kubenswrapper[4995]: I0120 17:30:37.029124 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:37 crc kubenswrapper[4995]: I0120 17:30:37.281262 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6jl8" event={"ID":"65d90b1f-153b-4f40-8f43-cf949cd067f3","Type":"ContainerStarted","Data":"e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978"} Jan 20 17:30:37 crc kubenswrapper[4995]: I0120 17:30:37.311945 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-s6jl8" podStartSLOduration=4.761178578 podStartE2EDuration="8.311916069s" podCreationTimestamp="2026-01-20 17:30:29 +0000 UTC" firstStartedPulling="2026-01-20 17:30:33.232215659 +0000 UTC m=+3551.476820465" lastFinishedPulling="2026-01-20 17:30:36.78295315 +0000 UTC m=+3555.027557956" observedRunningTime="2026-01-20 17:30:37.299230665 +0000 UTC m=+3555.543835511" watchObservedRunningTime="2026-01-20 17:30:37.311916069 +0000 UTC m=+3555.556520915" Jan 20 17:30:38 crc kubenswrapper[4995]: I0120 17:30:38.086156 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t7v88" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" containerName="registry-server" probeResult="failure" output=< Jan 20 17:30:38 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 17:30:38 crc kubenswrapper[4995]: > Jan 20 17:30:40 crc kubenswrapper[4995]: I0120 17:30:40.218621 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:40 crc kubenswrapper[4995]: I0120 17:30:40.220328 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:40 crc kubenswrapper[4995]: I0120 17:30:40.288399 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:41 crc kubenswrapper[4995]: I0120 17:30:41.380955 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:41 crc kubenswrapper[4995]: I0120 17:30:41.427103 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s6jl8"] Jan 20 17:30:43 crc kubenswrapper[4995]: I0120 17:30:43.339524 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-s6jl8" podUID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerName="registry-server" containerID="cri-o://e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978" gracePeriod=2 Jan 20 17:30:43 crc kubenswrapper[4995]: I0120 17:30:43.878733 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.055492 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-catalog-content\") pod \"65d90b1f-153b-4f40-8f43-cf949cd067f3\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.055546 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k55xv\" (UniqueName: \"kubernetes.io/projected/65d90b1f-153b-4f40-8f43-cf949cd067f3-kube-api-access-k55xv\") pod \"65d90b1f-153b-4f40-8f43-cf949cd067f3\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.056044 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-utilities\") pod \"65d90b1f-153b-4f40-8f43-cf949cd067f3\" (UID: \"65d90b1f-153b-4f40-8f43-cf949cd067f3\") " Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.056650 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-utilities" (OuterVolumeSpecName: "utilities") pod "65d90b1f-153b-4f40-8f43-cf949cd067f3" (UID: "65d90b1f-153b-4f40-8f43-cf949cd067f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.062303 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65d90b1f-153b-4f40-8f43-cf949cd067f3-kube-api-access-k55xv" (OuterVolumeSpecName: "kube-api-access-k55xv") pod "65d90b1f-153b-4f40-8f43-cf949cd067f3" (UID: "65d90b1f-153b-4f40-8f43-cf949cd067f3"). InnerVolumeSpecName "kube-api-access-k55xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.102786 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "65d90b1f-153b-4f40-8f43-cf949cd067f3" (UID: "65d90b1f-153b-4f40-8f43-cf949cd067f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.158262 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.158297 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k55xv\" (UniqueName: \"kubernetes.io/projected/65d90b1f-153b-4f40-8f43-cf949cd067f3-kube-api-access-k55xv\") on node \"crc\" DevicePath \"\"" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.158308 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d90b1f-153b-4f40-8f43-cf949cd067f3-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.351429 4995 generic.go:334] "Generic (PLEG): container finished" podID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerID="e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978" exitCode=0 Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.351475 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s6jl8" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.351489 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6jl8" event={"ID":"65d90b1f-153b-4f40-8f43-cf949cd067f3","Type":"ContainerDied","Data":"e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978"} Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.351536 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6jl8" event={"ID":"65d90b1f-153b-4f40-8f43-cf949cd067f3","Type":"ContainerDied","Data":"6d1f202fde900eae74eaef12bf8e326e63921e75ef9c6137b95dde9d306409ba"} Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.351587 4995 scope.go:117] "RemoveContainer" containerID="e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.379596 4995 scope.go:117] "RemoveContainer" containerID="371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.382686 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s6jl8"] Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.399468 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-s6jl8"] Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.402124 4995 scope.go:117] "RemoveContainer" containerID="7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.456226 4995 scope.go:117] "RemoveContainer" containerID="e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978" Jan 20 17:30:44 crc kubenswrapper[4995]: E0120 17:30:44.456757 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978\": container with ID starting with e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978 not found: ID does not exist" containerID="e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.456800 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978"} err="failed to get container status \"e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978\": rpc error: code = NotFound desc = could not find container \"e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978\": container with ID starting with e60117507e385681fbd3c1dbb4d8d6bb21f80947be5e6b0731b8662eecff6978 not found: ID does not exist" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.456851 4995 scope.go:117] "RemoveContainer" containerID="371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3" Jan 20 17:30:44 crc kubenswrapper[4995]: E0120 17:30:44.457205 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3\": container with ID starting with 371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3 not found: ID does not exist" containerID="371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.457244 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3"} err="failed to get container status \"371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3\": rpc error: code = NotFound desc = could not find container \"371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3\": container with ID starting with 371cbe57385933627bb31a8e9377ac62e70fab5dfb05b1e876d383d977c4e7d3 not found: ID does not exist" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.457271 4995 scope.go:117] "RemoveContainer" containerID="7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338" Jan 20 17:30:44 crc kubenswrapper[4995]: E0120 17:30:44.457517 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338\": container with ID starting with 7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338 not found: ID does not exist" containerID="7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338" Jan 20 17:30:44 crc kubenswrapper[4995]: I0120 17:30:44.457554 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338"} err="failed to get container status \"7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338\": rpc error: code = NotFound desc = could not find container \"7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338\": container with ID starting with 7e4af598918527fdbc9c3b99ff51c762749fa749a41ddfc531a391b4c9350338 not found: ID does not exist" Jan 20 17:30:46 crc kubenswrapper[4995]: I0120 17:30:46.001592 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65d90b1f-153b-4f40-8f43-cf949cd067f3" path="/var/lib/kubelet/pods/65d90b1f-153b-4f40-8f43-cf949cd067f3/volumes" Jan 20 17:30:47 crc kubenswrapper[4995]: I0120 17:30:47.075898 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:47 crc kubenswrapper[4995]: I0120 17:30:47.130446 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:50 crc kubenswrapper[4995]: I0120 17:30:50.690549 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t7v88"] Jan 20 17:30:50 crc kubenswrapper[4995]: I0120 17:30:50.691236 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t7v88" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" containerName="registry-server" containerID="cri-o://6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c" gracePeriod=2 Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.205737 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.288265 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-catalog-content\") pod \"2b18701b-67b6-48ae-aa37-d13976a66207\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.288567 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rtcp\" (UniqueName: \"kubernetes.io/projected/2b18701b-67b6-48ae-aa37-d13976a66207-kube-api-access-6rtcp\") pod \"2b18701b-67b6-48ae-aa37-d13976a66207\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.288620 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-utilities\") pod \"2b18701b-67b6-48ae-aa37-d13976a66207\" (UID: \"2b18701b-67b6-48ae-aa37-d13976a66207\") " Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.290769 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-utilities" (OuterVolumeSpecName: "utilities") pod "2b18701b-67b6-48ae-aa37-d13976a66207" (UID: "2b18701b-67b6-48ae-aa37-d13976a66207"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.307368 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b18701b-67b6-48ae-aa37-d13976a66207-kube-api-access-6rtcp" (OuterVolumeSpecName: "kube-api-access-6rtcp") pod "2b18701b-67b6-48ae-aa37-d13976a66207" (UID: "2b18701b-67b6-48ae-aa37-d13976a66207"). InnerVolumeSpecName "kube-api-access-6rtcp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.390734 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.390777 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rtcp\" (UniqueName: \"kubernetes.io/projected/2b18701b-67b6-48ae-aa37-d13976a66207-kube-api-access-6rtcp\") on node \"crc\" DevicePath \"\"" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.409336 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2b18701b-67b6-48ae-aa37-d13976a66207" (UID: "2b18701b-67b6-48ae-aa37-d13976a66207"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.419678 4995 generic.go:334] "Generic (PLEG): container finished" podID="2b18701b-67b6-48ae-aa37-d13976a66207" containerID="6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c" exitCode=0 Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.419731 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7v88" event={"ID":"2b18701b-67b6-48ae-aa37-d13976a66207","Type":"ContainerDied","Data":"6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c"} Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.419743 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7v88" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.419774 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7v88" event={"ID":"2b18701b-67b6-48ae-aa37-d13976a66207","Type":"ContainerDied","Data":"bde3a09afbd53a088dee4a069141ac57bc12375016bacd84db3a08afebf302b0"} Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.419798 4995 scope.go:117] "RemoveContainer" containerID="6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.453534 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t7v88"] Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.455644 4995 scope.go:117] "RemoveContainer" containerID="95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.462097 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t7v88"] Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.476882 4995 scope.go:117] "RemoveContainer" containerID="44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.492454 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b18701b-67b6-48ae-aa37-d13976a66207-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.537761 4995 scope.go:117] "RemoveContainer" containerID="6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c" Jan 20 17:30:51 crc kubenswrapper[4995]: E0120 17:30:51.538409 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c\": container with ID starting with 6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c not found: ID does not exist" containerID="6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.538462 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c"} err="failed to get container status \"6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c\": rpc error: code = NotFound desc = could not find container \"6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c\": container with ID starting with 6fef09775bc982e88a32898df2a1eb6b81f0c5dbe63ae2a04b0f034d0439c22c not found: ID does not exist" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.538495 4995 scope.go:117] "RemoveContainer" containerID="95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a" Jan 20 17:30:51 crc kubenswrapper[4995]: E0120 17:30:51.539201 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a\": container with ID starting with 95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a not found: ID does not exist" containerID="95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.539229 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a"} err="failed to get container status \"95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a\": rpc error: code = NotFound desc = could not find container \"95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a\": container with ID starting with 95db9cf8337bc59d6698bab9368e7bf17adcd68108e9a3d7660dacc707cc819a not found: ID does not exist" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.539244 4995 scope.go:117] "RemoveContainer" containerID="44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10" Jan 20 17:30:51 crc kubenswrapper[4995]: E0120 17:30:51.539644 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10\": container with ID starting with 44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10 not found: ID does not exist" containerID="44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10" Jan 20 17:30:51 crc kubenswrapper[4995]: I0120 17:30:51.539712 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10"} err="failed to get container status \"44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10\": rpc error: code = NotFound desc = could not find container \"44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10\": container with ID starting with 44b3cf865fa66384dbacf9037a6b6e8b46d232cf5a30c9f342c24d038f4ccd10 not found: ID does not exist" Jan 20 17:30:52 crc kubenswrapper[4995]: I0120 17:30:52.014176 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" path="/var/lib/kubelet/pods/2b18701b-67b6-48ae-aa37-d13976a66207/volumes" Jan 20 17:31:00 crc kubenswrapper[4995]: I0120 17:31:00.571733 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:31:00 crc kubenswrapper[4995]: I0120 17:31:00.572506 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:31:30 crc kubenswrapper[4995]: I0120 17:31:30.571440 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:31:30 crc kubenswrapper[4995]: I0120 17:31:30.572047 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:31:30 crc kubenswrapper[4995]: I0120 17:31:30.572113 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:31:30 crc kubenswrapper[4995]: I0120 17:31:30.572910 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:31:30 crc kubenswrapper[4995]: I0120 17:31:30.572968 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" gracePeriod=600 Jan 20 17:31:30 crc kubenswrapper[4995]: E0120 17:31:30.716176 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:31:30 crc kubenswrapper[4995]: I0120 17:31:30.826677 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" exitCode=0 Jan 20 17:31:30 crc kubenswrapper[4995]: I0120 17:31:30.826734 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137"} Jan 20 17:31:30 crc kubenswrapper[4995]: I0120 17:31:30.826772 4995 scope.go:117] "RemoveContainer" containerID="02400bc0fe91a996fa273f3cc04c8ce1fbf178cab39d7cb7347fd63dff277b28" Jan 20 17:31:30 crc kubenswrapper[4995]: I0120 17:31:30.827609 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:31:30 crc kubenswrapper[4995]: E0120 17:31:30.828009 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:31:45 crc kubenswrapper[4995]: I0120 17:31:45.994907 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:31:45 crc kubenswrapper[4995]: E0120 17:31:45.995772 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:31:57 crc kubenswrapper[4995]: I0120 17:31:57.994524 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:31:57 crc kubenswrapper[4995]: E0120 17:31:57.995439 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:32:08 crc kubenswrapper[4995]: I0120 17:32:08.991760 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:32:08 crc kubenswrapper[4995]: E0120 17:32:08.992883 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:32:19 crc kubenswrapper[4995]: I0120 17:32:19.990070 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:32:19 crc kubenswrapper[4995]: E0120 17:32:19.993436 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:32:32 crc kubenswrapper[4995]: I0120 17:32:32.990336 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:32:32 crc kubenswrapper[4995]: E0120 17:32:32.991138 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:32:45 crc kubenswrapper[4995]: I0120 17:32:45.990778 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:32:45 crc kubenswrapper[4995]: E0120 17:32:45.992782 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:32:59 crc kubenswrapper[4995]: I0120 17:32:59.990363 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:32:59 crc kubenswrapper[4995]: E0120 17:32:59.991131 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:33:12 crc kubenswrapper[4995]: I0120 17:33:12.990286 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:33:12 crc kubenswrapper[4995]: E0120 17:33:12.991380 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:33:27 crc kubenswrapper[4995]: I0120 17:33:27.989509 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:33:27 crc kubenswrapper[4995]: E0120 17:33:27.990496 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:33:42 crc kubenswrapper[4995]: I0120 17:33:42.990042 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:33:42 crc kubenswrapper[4995]: E0120 17:33:42.990870 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:33:53 crc kubenswrapper[4995]: I0120 17:33:53.990729 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:33:53 crc kubenswrapper[4995]: E0120 17:33:53.991712 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:34:07 crc kubenswrapper[4995]: I0120 17:34:07.990226 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:34:07 crc kubenswrapper[4995]: E0120 17:34:07.991038 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:34:18 crc kubenswrapper[4995]: I0120 17:34:18.989893 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:34:18 crc kubenswrapper[4995]: E0120 17:34:18.990694 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:34:29 crc kubenswrapper[4995]: I0120 17:34:29.989601 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:34:29 crc kubenswrapper[4995]: E0120 17:34:29.990425 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:34:42 crc kubenswrapper[4995]: I0120 17:34:42.989600 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:34:42 crc kubenswrapper[4995]: E0120 17:34:42.990440 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.051396 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ffmtf"] Jan 20 17:34:51 crc kubenswrapper[4995]: E0120 17:34:51.052378 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerName="registry-server" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.052392 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerName="registry-server" Jan 20 17:34:51 crc kubenswrapper[4995]: E0120 17:34:51.052410 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" containerName="extract-utilities" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.052416 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" containerName="extract-utilities" Jan 20 17:34:51 crc kubenswrapper[4995]: E0120 17:34:51.052434 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" containerName="extract-content" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.052439 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" containerName="extract-content" Jan 20 17:34:51 crc kubenswrapper[4995]: E0120 17:34:51.052454 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" containerName="registry-server" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.052460 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" containerName="registry-server" Jan 20 17:34:51 crc kubenswrapper[4995]: E0120 17:34:51.052468 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerName="extract-utilities" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.052474 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerName="extract-utilities" Jan 20 17:34:51 crc kubenswrapper[4995]: E0120 17:34:51.052486 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerName="extract-content" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.052491 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerName="extract-content" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.052662 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="65d90b1f-153b-4f40-8f43-cf949cd067f3" containerName="registry-server" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.052684 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b18701b-67b6-48ae-aa37-d13976a66207" containerName="registry-server" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.054027 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.118264 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ffmtf"] Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.216790 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br8vx\" (UniqueName: \"kubernetes.io/projected/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-kube-api-access-br8vx\") pod \"certified-operators-ffmtf\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.216851 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-catalog-content\") pod \"certified-operators-ffmtf\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.216983 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-utilities\") pod \"certified-operators-ffmtf\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.318513 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-utilities\") pod \"certified-operators-ffmtf\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.318640 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br8vx\" (UniqueName: \"kubernetes.io/projected/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-kube-api-access-br8vx\") pod \"certified-operators-ffmtf\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.318678 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-catalog-content\") pod \"certified-operators-ffmtf\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.319000 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-utilities\") pod \"certified-operators-ffmtf\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.319143 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-catalog-content\") pod \"certified-operators-ffmtf\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.349307 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br8vx\" (UniqueName: \"kubernetes.io/projected/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-kube-api-access-br8vx\") pod \"certified-operators-ffmtf\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.398843 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:34:51 crc kubenswrapper[4995]: I0120 17:34:51.930052 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ffmtf"] Jan 20 17:34:52 crc kubenswrapper[4995]: I0120 17:34:52.872177 4995 generic.go:334] "Generic (PLEG): container finished" podID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerID="fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63" exitCode=0 Jan 20 17:34:52 crc kubenswrapper[4995]: I0120 17:34:52.872220 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ffmtf" event={"ID":"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274","Type":"ContainerDied","Data":"fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63"} Jan 20 17:34:52 crc kubenswrapper[4995]: I0120 17:34:52.872708 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ffmtf" event={"ID":"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274","Type":"ContainerStarted","Data":"ce3de280a8d9877d1f4a2921930dc7702a5c15849aba4f7918b3e0ea73bc2c7d"} Jan 20 17:34:54 crc kubenswrapper[4995]: I0120 17:34:54.891842 4995 generic.go:334] "Generic (PLEG): container finished" podID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerID="4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5" exitCode=0 Jan 20 17:34:54 crc kubenswrapper[4995]: I0120 17:34:54.892009 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ffmtf" event={"ID":"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274","Type":"ContainerDied","Data":"4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5"} Jan 20 17:34:55 crc kubenswrapper[4995]: I0120 17:34:55.918786 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ffmtf" event={"ID":"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274","Type":"ContainerStarted","Data":"3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417"} Jan 20 17:34:55 crc kubenswrapper[4995]: I0120 17:34:55.944213 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ffmtf" podStartSLOduration=2.4479576769999998 podStartE2EDuration="4.944187085s" podCreationTimestamp="2026-01-20 17:34:51 +0000 UTC" firstStartedPulling="2026-01-20 17:34:52.877485741 +0000 UTC m=+3811.122090547" lastFinishedPulling="2026-01-20 17:34:55.373715149 +0000 UTC m=+3813.618319955" observedRunningTime="2026-01-20 17:34:55.935602121 +0000 UTC m=+3814.180206937" watchObservedRunningTime="2026-01-20 17:34:55.944187085 +0000 UTC m=+3814.188791891" Jan 20 17:34:55 crc kubenswrapper[4995]: I0120 17:34:55.990291 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:34:55 crc kubenswrapper[4995]: E0120 17:34:55.990543 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:35:01 crc kubenswrapper[4995]: I0120 17:35:01.399859 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:35:01 crc kubenswrapper[4995]: I0120 17:35:01.400502 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:35:01 crc kubenswrapper[4995]: I0120 17:35:01.445151 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:35:02 crc kubenswrapper[4995]: I0120 17:35:02.031459 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:35:02 crc kubenswrapper[4995]: I0120 17:35:02.080186 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ffmtf"] Jan 20 17:35:03 crc kubenswrapper[4995]: I0120 17:35:03.988614 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ffmtf" podUID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerName="registry-server" containerID="cri-o://3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417" gracePeriod=2 Jan 20 17:35:04 crc kubenswrapper[4995]: I0120 17:35:04.469095 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:35:04 crc kubenswrapper[4995]: I0120 17:35:04.586365 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br8vx\" (UniqueName: \"kubernetes.io/projected/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-kube-api-access-br8vx\") pod \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " Jan 20 17:35:04 crc kubenswrapper[4995]: I0120 17:35:04.586436 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-utilities\") pod \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " Jan 20 17:35:04 crc kubenswrapper[4995]: I0120 17:35:04.586482 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-catalog-content\") pod \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\" (UID: \"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274\") " Jan 20 17:35:04 crc kubenswrapper[4995]: I0120 17:35:04.587237 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-utilities" (OuterVolumeSpecName: "utilities") pod "3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" (UID: "3b9e7e06-ac48-4efc-98a6-b0c7a3b67274"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:35:04 crc kubenswrapper[4995]: I0120 17:35:04.593279 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-kube-api-access-br8vx" (OuterVolumeSpecName: "kube-api-access-br8vx") pod "3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" (UID: "3b9e7e06-ac48-4efc-98a6-b0c7a3b67274"). InnerVolumeSpecName "kube-api-access-br8vx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:35:04 crc kubenswrapper[4995]: I0120 17:35:04.688951 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br8vx\" (UniqueName: \"kubernetes.io/projected/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-kube-api-access-br8vx\") on node \"crc\" DevicePath \"\"" Jan 20 17:35:04 crc kubenswrapper[4995]: I0120 17:35:04.688988 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.032720 4995 generic.go:334] "Generic (PLEG): container finished" podID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerID="3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417" exitCode=0 Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.032768 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ffmtf" event={"ID":"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274","Type":"ContainerDied","Data":"3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417"} Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.032804 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ffmtf" event={"ID":"3b9e7e06-ac48-4efc-98a6-b0c7a3b67274","Type":"ContainerDied","Data":"ce3de280a8d9877d1f4a2921930dc7702a5c15849aba4f7918b3e0ea73bc2c7d"} Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.032823 4995 scope.go:117] "RemoveContainer" containerID="3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.032824 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ffmtf" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.055454 4995 scope.go:117] "RemoveContainer" containerID="4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.076122 4995 scope.go:117] "RemoveContainer" containerID="fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.142001 4995 scope.go:117] "RemoveContainer" containerID="3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417" Jan 20 17:35:05 crc kubenswrapper[4995]: E0120 17:35:05.142754 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417\": container with ID starting with 3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417 not found: ID does not exist" containerID="3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.142805 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417"} err="failed to get container status \"3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417\": rpc error: code = NotFound desc = could not find container \"3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417\": container with ID starting with 3dd2c86151c8762e4fa6887c24d2c6f3451e92dcf4a90c64f24f2f97b7bc0417 not found: ID does not exist" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.142835 4995 scope.go:117] "RemoveContainer" containerID="4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5" Jan 20 17:35:05 crc kubenswrapper[4995]: E0120 17:35:05.143450 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5\": container with ID starting with 4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5 not found: ID does not exist" containerID="4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.143513 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5"} err="failed to get container status \"4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5\": rpc error: code = NotFound desc = could not find container \"4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5\": container with ID starting with 4ad0e5754435ff1dbee649535209add5c532fa0352f86c430641e6ecd5aaffe5 not found: ID does not exist" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.143549 4995 scope.go:117] "RemoveContainer" containerID="fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63" Jan 20 17:35:05 crc kubenswrapper[4995]: E0120 17:35:05.143936 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63\": container with ID starting with fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63 not found: ID does not exist" containerID="fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.143962 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63"} err="failed to get container status \"fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63\": rpc error: code = NotFound desc = could not find container \"fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63\": container with ID starting with fb99d8d824c3568118550ee02f494e69a69b20a0216910d3bd8e22ef88766d63 not found: ID does not exist" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.253162 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" (UID: "3b9e7e06-ac48-4efc-98a6-b0c7a3b67274"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.326452 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.372064 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ffmtf"] Jan 20 17:35:05 crc kubenswrapper[4995]: I0120 17:35:05.381004 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ffmtf"] Jan 20 17:35:06 crc kubenswrapper[4995]: I0120 17:35:06.004239 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" path="/var/lib/kubelet/pods/3b9e7e06-ac48-4efc-98a6-b0c7a3b67274/volumes" Jan 20 17:35:07 crc kubenswrapper[4995]: I0120 17:35:07.991893 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:35:07 crc kubenswrapper[4995]: E0120 17:35:07.992878 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:35:22 crc kubenswrapper[4995]: I0120 17:35:22.990048 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:35:22 crc kubenswrapper[4995]: E0120 17:35:22.990976 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:35:37 crc kubenswrapper[4995]: I0120 17:35:37.989668 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:35:37 crc kubenswrapper[4995]: E0120 17:35:37.990451 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:35:50 crc kubenswrapper[4995]: I0120 17:35:50.989581 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:35:50 crc kubenswrapper[4995]: E0120 17:35:50.991213 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:36:05 crc kubenswrapper[4995]: I0120 17:36:05.990013 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:36:05 crc kubenswrapper[4995]: E0120 17:36:05.991071 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:36:17 crc kubenswrapper[4995]: I0120 17:36:17.990278 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:36:17 crc kubenswrapper[4995]: E0120 17:36:17.991422 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:36:28 crc kubenswrapper[4995]: I0120 17:36:28.989778 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:36:28 crc kubenswrapper[4995]: E0120 17:36:28.990583 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:36:43 crc kubenswrapper[4995]: I0120 17:36:43.989642 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:36:45 crc kubenswrapper[4995]: I0120 17:36:45.014136 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"774324e35d0bc3559af1e50e18189873e201a55ab4814c730585a5be26bc6dc5"} Jan 20 17:39:00 crc kubenswrapper[4995]: I0120 17:39:00.571307 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:39:00 crc kubenswrapper[4995]: I0120 17:39:00.571847 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:39:30 crc kubenswrapper[4995]: I0120 17:39:30.571759 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:39:30 crc kubenswrapper[4995]: I0120 17:39:30.572803 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:40:00 crc kubenswrapper[4995]: I0120 17:40:00.572138 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:40:00 crc kubenswrapper[4995]: I0120 17:40:00.572935 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:40:00 crc kubenswrapper[4995]: I0120 17:40:00.573007 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:40:00 crc kubenswrapper[4995]: I0120 17:40:00.574483 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"774324e35d0bc3559af1e50e18189873e201a55ab4814c730585a5be26bc6dc5"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:40:00 crc kubenswrapper[4995]: I0120 17:40:00.574605 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://774324e35d0bc3559af1e50e18189873e201a55ab4814c730585a5be26bc6dc5" gracePeriod=600 Jan 20 17:40:00 crc kubenswrapper[4995]: I0120 17:40:00.943801 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="774324e35d0bc3559af1e50e18189873e201a55ab4814c730585a5be26bc6dc5" exitCode=0 Jan 20 17:40:00 crc kubenswrapper[4995]: I0120 17:40:00.943871 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"774324e35d0bc3559af1e50e18189873e201a55ab4814c730585a5be26bc6dc5"} Jan 20 17:40:00 crc kubenswrapper[4995]: I0120 17:40:00.944189 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2"} Jan 20 17:40:00 crc kubenswrapper[4995]: I0120 17:40:00.944215 4995 scope.go:117] "RemoveContainer" containerID="56e740de00c1a68f47e87e4be74895e5a920b8c8f25ebc343929bc9eee1d4137" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.180777 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7lrj5"] Jan 20 17:40:40 crc kubenswrapper[4995]: E0120 17:40:40.181944 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerName="registry-server" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.181963 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerName="registry-server" Jan 20 17:40:40 crc kubenswrapper[4995]: E0120 17:40:40.181990 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerName="extract-utilities" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.181999 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerName="extract-utilities" Jan 20 17:40:40 crc kubenswrapper[4995]: E0120 17:40:40.182014 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerName="extract-content" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.182023 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerName="extract-content" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.182289 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b9e7e06-ac48-4efc-98a6-b0c7a3b67274" containerName="registry-server" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.184254 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.212439 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7lrj5"] Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.301612 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqn2l\" (UniqueName: \"kubernetes.io/projected/337ca5c6-f52e-4fc8-abef-dc5777b61358-kube-api-access-jqn2l\") pod \"redhat-operators-7lrj5\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.301784 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-utilities\") pod \"redhat-operators-7lrj5\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.301863 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-catalog-content\") pod \"redhat-operators-7lrj5\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.404506 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-catalog-content\") pod \"redhat-operators-7lrj5\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.404620 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqn2l\" (UniqueName: \"kubernetes.io/projected/337ca5c6-f52e-4fc8-abef-dc5777b61358-kube-api-access-jqn2l\") pod \"redhat-operators-7lrj5\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.404711 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-utilities\") pod \"redhat-operators-7lrj5\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.405118 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-utilities\") pod \"redhat-operators-7lrj5\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.405367 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-catalog-content\") pod \"redhat-operators-7lrj5\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.424128 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqn2l\" (UniqueName: \"kubernetes.io/projected/337ca5c6-f52e-4fc8-abef-dc5777b61358-kube-api-access-jqn2l\") pod \"redhat-operators-7lrj5\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.508212 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:40 crc kubenswrapper[4995]: I0120 17:40:40.948513 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7lrj5"] Jan 20 17:40:40 crc kubenswrapper[4995]: W0120 17:40:40.953776 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod337ca5c6_f52e_4fc8_abef_dc5777b61358.slice/crio-b6ad79a0c2f256a735068e98a4fc7f8085221b9f51db03060d62b84547389f88 WatchSource:0}: Error finding container b6ad79a0c2f256a735068e98a4fc7f8085221b9f51db03060d62b84547389f88: Status 404 returned error can't find the container with id b6ad79a0c2f256a735068e98a4fc7f8085221b9f51db03060d62b84547389f88 Jan 20 17:40:41 crc kubenswrapper[4995]: I0120 17:40:41.481044 4995 generic.go:334] "Generic (PLEG): container finished" podID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerID="530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1" exitCode=0 Jan 20 17:40:41 crc kubenswrapper[4995]: I0120 17:40:41.481122 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7lrj5" event={"ID":"337ca5c6-f52e-4fc8-abef-dc5777b61358","Type":"ContainerDied","Data":"530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1"} Jan 20 17:40:41 crc kubenswrapper[4995]: I0120 17:40:41.482369 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7lrj5" event={"ID":"337ca5c6-f52e-4fc8-abef-dc5777b61358","Type":"ContainerStarted","Data":"b6ad79a0c2f256a735068e98a4fc7f8085221b9f51db03060d62b84547389f88"} Jan 20 17:40:41 crc kubenswrapper[4995]: I0120 17:40:41.483702 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 17:40:44 crc kubenswrapper[4995]: I0120 17:40:44.521772 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7lrj5" event={"ID":"337ca5c6-f52e-4fc8-abef-dc5777b61358","Type":"ContainerStarted","Data":"26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a"} Jan 20 17:40:45 crc kubenswrapper[4995]: I0120 17:40:45.755298 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-x7mf7"] Jan 20 17:40:45 crc kubenswrapper[4995]: I0120 17:40:45.762462 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:45 crc kubenswrapper[4995]: I0120 17:40:45.769960 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7mf7"] Jan 20 17:40:45 crc kubenswrapper[4995]: I0120 17:40:45.924991 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-utilities\") pod \"redhat-marketplace-x7mf7\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:45 crc kubenswrapper[4995]: I0120 17:40:45.925228 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-catalog-content\") pod \"redhat-marketplace-x7mf7\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:45 crc kubenswrapper[4995]: I0120 17:40:45.925296 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sznj\" (UniqueName: \"kubernetes.io/projected/55a308f3-74f2-4b58-bf20-6e7ea000625f-kube-api-access-4sznj\") pod \"redhat-marketplace-x7mf7\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:46 crc kubenswrapper[4995]: I0120 17:40:46.027595 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-utilities\") pod \"redhat-marketplace-x7mf7\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:46 crc kubenswrapper[4995]: I0120 17:40:46.027699 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-catalog-content\") pod \"redhat-marketplace-x7mf7\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:46 crc kubenswrapper[4995]: I0120 17:40:46.027734 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sznj\" (UniqueName: \"kubernetes.io/projected/55a308f3-74f2-4b58-bf20-6e7ea000625f-kube-api-access-4sznj\") pod \"redhat-marketplace-x7mf7\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:46 crc kubenswrapper[4995]: I0120 17:40:46.028058 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-utilities\") pod \"redhat-marketplace-x7mf7\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:46 crc kubenswrapper[4995]: I0120 17:40:46.028224 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-catalog-content\") pod \"redhat-marketplace-x7mf7\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:46 crc kubenswrapper[4995]: I0120 17:40:46.049931 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sznj\" (UniqueName: \"kubernetes.io/projected/55a308f3-74f2-4b58-bf20-6e7ea000625f-kube-api-access-4sznj\") pod \"redhat-marketplace-x7mf7\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:46 crc kubenswrapper[4995]: I0120 17:40:46.082198 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:46 crc kubenswrapper[4995]: I0120 17:40:46.604468 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7mf7"] Jan 20 17:40:46 crc kubenswrapper[4995]: W0120 17:40:46.605117 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55a308f3_74f2_4b58_bf20_6e7ea000625f.slice/crio-50b8fa1ccb0d5084e3cbcac0bbae20e06c2ae4d999c36f7d1ab0aeb1cab5bbe4 WatchSource:0}: Error finding container 50b8fa1ccb0d5084e3cbcac0bbae20e06c2ae4d999c36f7d1ab0aeb1cab5bbe4: Status 404 returned error can't find the container with id 50b8fa1ccb0d5084e3cbcac0bbae20e06c2ae4d999c36f7d1ab0aeb1cab5bbe4 Jan 20 17:40:47 crc kubenswrapper[4995]: I0120 17:40:47.556748 4995 generic.go:334] "Generic (PLEG): container finished" podID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerID="ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b" exitCode=0 Jan 20 17:40:47 crc kubenswrapper[4995]: I0120 17:40:47.556885 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7mf7" event={"ID":"55a308f3-74f2-4b58-bf20-6e7ea000625f","Type":"ContainerDied","Data":"ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b"} Jan 20 17:40:47 crc kubenswrapper[4995]: I0120 17:40:47.557468 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7mf7" event={"ID":"55a308f3-74f2-4b58-bf20-6e7ea000625f","Type":"ContainerStarted","Data":"50b8fa1ccb0d5084e3cbcac0bbae20e06c2ae4d999c36f7d1ab0aeb1cab5bbe4"} Jan 20 17:40:47 crc kubenswrapper[4995]: I0120 17:40:47.562575 4995 generic.go:334] "Generic (PLEG): container finished" podID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerID="26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a" exitCode=0 Jan 20 17:40:47 crc kubenswrapper[4995]: I0120 17:40:47.562640 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7lrj5" event={"ID":"337ca5c6-f52e-4fc8-abef-dc5777b61358","Type":"ContainerDied","Data":"26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a"} Jan 20 17:40:48 crc kubenswrapper[4995]: I0120 17:40:48.573648 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7lrj5" event={"ID":"337ca5c6-f52e-4fc8-abef-dc5777b61358","Type":"ContainerStarted","Data":"7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2"} Jan 20 17:40:48 crc kubenswrapper[4995]: I0120 17:40:48.575373 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7mf7" event={"ID":"55a308f3-74f2-4b58-bf20-6e7ea000625f","Type":"ContainerStarted","Data":"37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc"} Jan 20 17:40:48 crc kubenswrapper[4995]: I0120 17:40:48.596206 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7lrj5" podStartSLOduration=2.060699082 podStartE2EDuration="8.596183235s" podCreationTimestamp="2026-01-20 17:40:40 +0000 UTC" firstStartedPulling="2026-01-20 17:40:41.483475802 +0000 UTC m=+4159.728080608" lastFinishedPulling="2026-01-20 17:40:48.018959955 +0000 UTC m=+4166.263564761" observedRunningTime="2026-01-20 17:40:48.596026711 +0000 UTC m=+4166.840631517" watchObservedRunningTime="2026-01-20 17:40:48.596183235 +0000 UTC m=+4166.840788061" Jan 20 17:40:48 crc kubenswrapper[4995]: I0120 17:40:48.945482 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jqzfc"] Jan 20 17:40:48 crc kubenswrapper[4995]: I0120 17:40:48.948708 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:48 crc kubenswrapper[4995]: I0120 17:40:48.966486 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jqzfc"] Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.097656 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-utilities\") pod \"community-operators-jqzfc\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.097888 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4jvk\" (UniqueName: \"kubernetes.io/projected/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-kube-api-access-z4jvk\") pod \"community-operators-jqzfc\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.097939 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-catalog-content\") pod \"community-operators-jqzfc\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.199998 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4jvk\" (UniqueName: \"kubernetes.io/projected/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-kube-api-access-z4jvk\") pod \"community-operators-jqzfc\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.200064 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-catalog-content\") pod \"community-operators-jqzfc\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.200114 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-utilities\") pod \"community-operators-jqzfc\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.200720 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-catalog-content\") pod \"community-operators-jqzfc\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.200727 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-utilities\") pod \"community-operators-jqzfc\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.217901 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4jvk\" (UniqueName: \"kubernetes.io/projected/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-kube-api-access-z4jvk\") pod \"community-operators-jqzfc\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.278318 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.589490 4995 generic.go:334] "Generic (PLEG): container finished" podID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerID="37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc" exitCode=0 Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.589531 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7mf7" event={"ID":"55a308f3-74f2-4b58-bf20-6e7ea000625f","Type":"ContainerDied","Data":"37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc"} Jan 20 17:40:49 crc kubenswrapper[4995]: I0120 17:40:49.841230 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jqzfc"] Jan 20 17:40:50 crc kubenswrapper[4995]: I0120 17:40:50.508780 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:50 crc kubenswrapper[4995]: I0120 17:40:50.509141 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:40:50 crc kubenswrapper[4995]: I0120 17:40:50.602430 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7mf7" event={"ID":"55a308f3-74f2-4b58-bf20-6e7ea000625f","Type":"ContainerStarted","Data":"e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db"} Jan 20 17:40:50 crc kubenswrapper[4995]: I0120 17:40:50.604652 4995 generic.go:334] "Generic (PLEG): container finished" podID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerID="6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889" exitCode=0 Jan 20 17:40:50 crc kubenswrapper[4995]: I0120 17:40:50.604709 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqzfc" event={"ID":"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3","Type":"ContainerDied","Data":"6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889"} Jan 20 17:40:50 crc kubenswrapper[4995]: I0120 17:40:50.604741 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqzfc" event={"ID":"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3","Type":"ContainerStarted","Data":"16c3cfe70219c07af62e390d414ef4af4d3c95da42da5bc67e866e609ebe7531"} Jan 20 17:40:50 crc kubenswrapper[4995]: I0120 17:40:50.638527 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-x7mf7" podStartSLOduration=3.14304205 podStartE2EDuration="5.638500156s" podCreationTimestamp="2026-01-20 17:40:45 +0000 UTC" firstStartedPulling="2026-01-20 17:40:47.560437523 +0000 UTC m=+4165.805042339" lastFinishedPulling="2026-01-20 17:40:50.055895599 +0000 UTC m=+4168.300500445" observedRunningTime="2026-01-20 17:40:50.633918072 +0000 UTC m=+4168.878522898" watchObservedRunningTime="2026-01-20 17:40:50.638500156 +0000 UTC m=+4168.883104982" Jan 20 17:40:51 crc kubenswrapper[4995]: I0120 17:40:51.558793 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7lrj5" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="registry-server" probeResult="failure" output=< Jan 20 17:40:51 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 17:40:51 crc kubenswrapper[4995]: > Jan 20 17:40:51 crc kubenswrapper[4995]: I0120 17:40:51.614386 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqzfc" event={"ID":"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3","Type":"ContainerStarted","Data":"4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867"} Jan 20 17:40:52 crc kubenswrapper[4995]: I0120 17:40:52.624209 4995 generic.go:334] "Generic (PLEG): container finished" podID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerID="4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867" exitCode=0 Jan 20 17:40:52 crc kubenswrapper[4995]: I0120 17:40:52.624288 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqzfc" event={"ID":"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3","Type":"ContainerDied","Data":"4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867"} Jan 20 17:40:53 crc kubenswrapper[4995]: I0120 17:40:53.639200 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqzfc" event={"ID":"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3","Type":"ContainerStarted","Data":"cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d"} Jan 20 17:40:53 crc kubenswrapper[4995]: I0120 17:40:53.659713 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jqzfc" podStartSLOduration=3.116926903 podStartE2EDuration="5.659695391s" podCreationTimestamp="2026-01-20 17:40:48 +0000 UTC" firstStartedPulling="2026-01-20 17:40:50.60671185 +0000 UTC m=+4168.851316676" lastFinishedPulling="2026-01-20 17:40:53.149480358 +0000 UTC m=+4171.394085164" observedRunningTime="2026-01-20 17:40:53.655535537 +0000 UTC m=+4171.900140343" watchObservedRunningTime="2026-01-20 17:40:53.659695391 +0000 UTC m=+4171.904300197" Jan 20 17:40:56 crc kubenswrapper[4995]: I0120 17:40:56.082761 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:56 crc kubenswrapper[4995]: I0120 17:40:56.084159 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:56 crc kubenswrapper[4995]: I0120 17:40:56.782218 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:57 crc kubenswrapper[4995]: I0120 17:40:57.766625 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:40:58 crc kubenswrapper[4995]: I0120 17:40:58.338740 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7mf7"] Jan 20 17:40:59 crc kubenswrapper[4995]: I0120 17:40:59.278701 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:59 crc kubenswrapper[4995]: I0120 17:40:59.279540 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:59 crc kubenswrapper[4995]: I0120 17:40:59.339584 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:40:59 crc kubenswrapper[4995]: I0120 17:40:59.714495 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-x7mf7" podUID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerName="registry-server" containerID="cri-o://e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db" gracePeriod=2 Jan 20 17:40:59 crc kubenswrapper[4995]: I0120 17:40:59.771455 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.266884 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.347319 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-utilities\") pod \"55a308f3-74f2-4b58-bf20-6e7ea000625f\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.347561 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4sznj\" (UniqueName: \"kubernetes.io/projected/55a308f3-74f2-4b58-bf20-6e7ea000625f-kube-api-access-4sznj\") pod \"55a308f3-74f2-4b58-bf20-6e7ea000625f\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.347643 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-catalog-content\") pod \"55a308f3-74f2-4b58-bf20-6e7ea000625f\" (UID: \"55a308f3-74f2-4b58-bf20-6e7ea000625f\") " Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.347977 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-utilities" (OuterVolumeSpecName: "utilities") pod "55a308f3-74f2-4b58-bf20-6e7ea000625f" (UID: "55a308f3-74f2-4b58-bf20-6e7ea000625f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.348319 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.353127 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55a308f3-74f2-4b58-bf20-6e7ea000625f-kube-api-access-4sznj" (OuterVolumeSpecName: "kube-api-access-4sznj") pod "55a308f3-74f2-4b58-bf20-6e7ea000625f" (UID: "55a308f3-74f2-4b58-bf20-6e7ea000625f"). InnerVolumeSpecName "kube-api-access-4sznj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.371159 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "55a308f3-74f2-4b58-bf20-6e7ea000625f" (UID: "55a308f3-74f2-4b58-bf20-6e7ea000625f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.449753 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4sznj\" (UniqueName: \"kubernetes.io/projected/55a308f3-74f2-4b58-bf20-6e7ea000625f-kube-api-access-4sznj\") on node \"crc\" DevicePath \"\"" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.449784 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55a308f3-74f2-4b58-bf20-6e7ea000625f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.724672 4995 generic.go:334] "Generic (PLEG): container finished" podID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerID="e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db" exitCode=0 Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.725064 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x7mf7" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.725055 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7mf7" event={"ID":"55a308f3-74f2-4b58-bf20-6e7ea000625f","Type":"ContainerDied","Data":"e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db"} Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.725275 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7mf7" event={"ID":"55a308f3-74f2-4b58-bf20-6e7ea000625f","Type":"ContainerDied","Data":"50b8fa1ccb0d5084e3cbcac0bbae20e06c2ae4d999c36f7d1ab0aeb1cab5bbe4"} Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.725309 4995 scope.go:117] "RemoveContainer" containerID="e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.749635 4995 scope.go:117] "RemoveContainer" containerID="37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.766399 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7mf7"] Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.775014 4995 scope.go:117] "RemoveContainer" containerID="ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.775387 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7mf7"] Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.799912 4995 scope.go:117] "RemoveContainer" containerID="e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db" Jan 20 17:41:00 crc kubenswrapper[4995]: E0120 17:41:00.800438 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db\": container with ID starting with e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db not found: ID does not exist" containerID="e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.800478 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db"} err="failed to get container status \"e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db\": rpc error: code = NotFound desc = could not find container \"e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db\": container with ID starting with e09e19ebff1fb8e657cc2afeba28fd5c53cedd3404cfd58a3d8b6de21d01e6db not found: ID does not exist" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.800504 4995 scope.go:117] "RemoveContainer" containerID="37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc" Jan 20 17:41:00 crc kubenswrapper[4995]: E0120 17:41:00.800861 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc\": container with ID starting with 37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc not found: ID does not exist" containerID="37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.800902 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc"} err="failed to get container status \"37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc\": rpc error: code = NotFound desc = could not find container \"37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc\": container with ID starting with 37ad878a61b4e58f7baec98a1f943d177219837d5772222d23419f6dab8fb0dc not found: ID does not exist" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.800931 4995 scope.go:117] "RemoveContainer" containerID="ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b" Jan 20 17:41:00 crc kubenswrapper[4995]: E0120 17:41:00.801323 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b\": container with ID starting with ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b not found: ID does not exist" containerID="ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b" Jan 20 17:41:00 crc kubenswrapper[4995]: I0120 17:41:00.801351 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b"} err="failed to get container status \"ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b\": rpc error: code = NotFound desc = could not find container \"ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b\": container with ID starting with ed87271edf2fe9a12c02c9998187a86d956fcd06c32727cf3eaed50c6076717b not found: ID does not exist" Jan 20 17:41:01 crc kubenswrapper[4995]: I0120 17:41:01.580955 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7lrj5" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="registry-server" probeResult="failure" output=< Jan 20 17:41:01 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 17:41:01 crc kubenswrapper[4995]: > Jan 20 17:41:02 crc kubenswrapper[4995]: I0120 17:41:02.012666 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55a308f3-74f2-4b58-bf20-6e7ea000625f" path="/var/lib/kubelet/pods/55a308f3-74f2-4b58-bf20-6e7ea000625f/volumes" Jan 20 17:41:02 crc kubenswrapper[4995]: I0120 17:41:02.841433 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jqzfc"] Jan 20 17:41:02 crc kubenswrapper[4995]: I0120 17:41:02.847034 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jqzfc" podUID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerName="registry-server" containerID="cri-o://cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d" gracePeriod=2 Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.396241 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.422636 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4jvk\" (UniqueName: \"kubernetes.io/projected/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-kube-api-access-z4jvk\") pod \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.422745 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-utilities\") pod \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.422939 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-catalog-content\") pod \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\" (UID: \"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3\") " Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.423466 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-utilities" (OuterVolumeSpecName: "utilities") pod "8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" (UID: "8a790de9-6a4b-4acb-9c99-5cd92ec6fff3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.431337 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-kube-api-access-z4jvk" (OuterVolumeSpecName: "kube-api-access-z4jvk") pod "8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" (UID: "8a790de9-6a4b-4acb-9c99-5cd92ec6fff3"). InnerVolumeSpecName "kube-api-access-z4jvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.472692 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" (UID: "8a790de9-6a4b-4acb-9c99-5cd92ec6fff3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.527553 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.527586 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.527595 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4jvk\" (UniqueName: \"kubernetes.io/projected/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3-kube-api-access-z4jvk\") on node \"crc\" DevicePath \"\"" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.831056 4995 generic.go:334] "Generic (PLEG): container finished" podID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerID="cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d" exitCode=0 Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.831157 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqzfc" event={"ID":"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3","Type":"ContainerDied","Data":"cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d"} Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.831198 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jqzfc" event={"ID":"8a790de9-6a4b-4acb-9c99-5cd92ec6fff3","Type":"ContainerDied","Data":"16c3cfe70219c07af62e390d414ef4af4d3c95da42da5bc67e866e609ebe7531"} Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.831235 4995 scope.go:117] "RemoveContainer" containerID="cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.831473 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jqzfc" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.867504 4995 scope.go:117] "RemoveContainer" containerID="4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.886629 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jqzfc"] Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.898629 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jqzfc"] Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.899771 4995 scope.go:117] "RemoveContainer" containerID="6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.949803 4995 scope.go:117] "RemoveContainer" containerID="cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d" Jan 20 17:41:03 crc kubenswrapper[4995]: E0120 17:41:03.950275 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d\": container with ID starting with cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d not found: ID does not exist" containerID="cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.950315 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d"} err="failed to get container status \"cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d\": rpc error: code = NotFound desc = could not find container \"cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d\": container with ID starting with cb9c94f6bd2aeff5e20d0a5e2f9fe2c6796c5ac0ba5840cf98373cd74aeb5f2d not found: ID does not exist" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.950338 4995 scope.go:117] "RemoveContainer" containerID="4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867" Jan 20 17:41:03 crc kubenswrapper[4995]: E0120 17:41:03.950648 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867\": container with ID starting with 4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867 not found: ID does not exist" containerID="4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.950677 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867"} err="failed to get container status \"4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867\": rpc error: code = NotFound desc = could not find container \"4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867\": container with ID starting with 4cb988cf58d439e165dc6b882510ba6cb20f294e6e21d909b317c62d40e19867 not found: ID does not exist" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.950694 4995 scope.go:117] "RemoveContainer" containerID="6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889" Jan 20 17:41:03 crc kubenswrapper[4995]: E0120 17:41:03.950962 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889\": container with ID starting with 6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889 not found: ID does not exist" containerID="6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889" Jan 20 17:41:03 crc kubenswrapper[4995]: I0120 17:41:03.950994 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889"} err="failed to get container status \"6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889\": rpc error: code = NotFound desc = could not find container \"6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889\": container with ID starting with 6eeb3ae35dbd6569a3c5013af596fc9565ce8fa0f89b5e6a019e94457b1ab889 not found: ID does not exist" Jan 20 17:41:04 crc kubenswrapper[4995]: I0120 17:41:04.001625 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" path="/var/lib/kubelet/pods/8a790de9-6a4b-4acb-9c99-5cd92ec6fff3/volumes" Jan 20 17:41:10 crc kubenswrapper[4995]: I0120 17:41:10.563192 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:41:10 crc kubenswrapper[4995]: I0120 17:41:10.619773 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:41:11 crc kubenswrapper[4995]: I0120 17:41:11.372627 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7lrj5"] Jan 20 17:41:11 crc kubenswrapper[4995]: I0120 17:41:11.916348 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7lrj5" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="registry-server" containerID="cri-o://7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2" gracePeriod=2 Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.466887 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.512554 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-utilities\") pod \"337ca5c6-f52e-4fc8-abef-dc5777b61358\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.512688 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqn2l\" (UniqueName: \"kubernetes.io/projected/337ca5c6-f52e-4fc8-abef-dc5777b61358-kube-api-access-jqn2l\") pod \"337ca5c6-f52e-4fc8-abef-dc5777b61358\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.512831 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-catalog-content\") pod \"337ca5c6-f52e-4fc8-abef-dc5777b61358\" (UID: \"337ca5c6-f52e-4fc8-abef-dc5777b61358\") " Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.513257 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-utilities" (OuterVolumeSpecName: "utilities") pod "337ca5c6-f52e-4fc8-abef-dc5777b61358" (UID: "337ca5c6-f52e-4fc8-abef-dc5777b61358"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.519497 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/337ca5c6-f52e-4fc8-abef-dc5777b61358-kube-api-access-jqn2l" (OuterVolumeSpecName: "kube-api-access-jqn2l") pod "337ca5c6-f52e-4fc8-abef-dc5777b61358" (UID: "337ca5c6-f52e-4fc8-abef-dc5777b61358"). InnerVolumeSpecName "kube-api-access-jqn2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.615737 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.615769 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqn2l\" (UniqueName: \"kubernetes.io/projected/337ca5c6-f52e-4fc8-abef-dc5777b61358-kube-api-access-jqn2l\") on node \"crc\" DevicePath \"\"" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.649452 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "337ca5c6-f52e-4fc8-abef-dc5777b61358" (UID: "337ca5c6-f52e-4fc8-abef-dc5777b61358"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.717011 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/337ca5c6-f52e-4fc8-abef-dc5777b61358-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.931311 4995 generic.go:334] "Generic (PLEG): container finished" podID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerID="7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2" exitCode=0 Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.931377 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7lrj5" event={"ID":"337ca5c6-f52e-4fc8-abef-dc5777b61358","Type":"ContainerDied","Data":"7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2"} Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.931479 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7lrj5" event={"ID":"337ca5c6-f52e-4fc8-abef-dc5777b61358","Type":"ContainerDied","Data":"b6ad79a0c2f256a735068e98a4fc7f8085221b9f51db03060d62b84547389f88"} Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.931503 4995 scope.go:117] "RemoveContainer" containerID="7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.931523 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7lrj5" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.954541 4995 scope.go:117] "RemoveContainer" containerID="26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a" Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.987831 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7lrj5"] Jan 20 17:41:12 crc kubenswrapper[4995]: I0120 17:41:12.999129 4995 scope.go:117] "RemoveContainer" containerID="530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1" Jan 20 17:41:13 crc kubenswrapper[4995]: I0120 17:41:13.005967 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7lrj5"] Jan 20 17:41:13 crc kubenswrapper[4995]: I0120 17:41:13.027492 4995 scope.go:117] "RemoveContainer" containerID="7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2" Jan 20 17:41:13 crc kubenswrapper[4995]: E0120 17:41:13.028442 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2\": container with ID starting with 7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2 not found: ID does not exist" containerID="7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2" Jan 20 17:41:13 crc kubenswrapper[4995]: I0120 17:41:13.028490 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2"} err="failed to get container status \"7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2\": rpc error: code = NotFound desc = could not find container \"7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2\": container with ID starting with 7e6a4e08a05c89ce42740154e7cec79863359ac46004336551640f8b78a543e2 not found: ID does not exist" Jan 20 17:41:13 crc kubenswrapper[4995]: I0120 17:41:13.028521 4995 scope.go:117] "RemoveContainer" containerID="26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a" Jan 20 17:41:13 crc kubenswrapper[4995]: E0120 17:41:13.028872 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a\": container with ID starting with 26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a not found: ID does not exist" containerID="26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a" Jan 20 17:41:13 crc kubenswrapper[4995]: I0120 17:41:13.028980 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a"} err="failed to get container status \"26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a\": rpc error: code = NotFound desc = could not find container \"26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a\": container with ID starting with 26951d89416f2523ad21cbde68784895ec7c8b37c704e81c629005eafafd506a not found: ID does not exist" Jan 20 17:41:13 crc kubenswrapper[4995]: I0120 17:41:13.029109 4995 scope.go:117] "RemoveContainer" containerID="530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1" Jan 20 17:41:13 crc kubenswrapper[4995]: E0120 17:41:13.029492 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1\": container with ID starting with 530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1 not found: ID does not exist" containerID="530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1" Jan 20 17:41:13 crc kubenswrapper[4995]: I0120 17:41:13.029524 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1"} err="failed to get container status \"530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1\": rpc error: code = NotFound desc = could not find container \"530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1\": container with ID starting with 530d63bec27c4facc842b646586a6d6fbbc0ec9c320949d56300a7560c4db6f1 not found: ID does not exist" Jan 20 17:41:14 crc kubenswrapper[4995]: I0120 17:41:14.010837 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" path="/var/lib/kubelet/pods/337ca5c6-f52e-4fc8-abef-dc5777b61358/volumes" Jan 20 17:42:00 crc kubenswrapper[4995]: I0120 17:42:00.571259 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:42:00 crc kubenswrapper[4995]: I0120 17:42:00.571846 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:42:30 crc kubenswrapper[4995]: I0120 17:42:30.572564 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:42:30 crc kubenswrapper[4995]: I0120 17:42:30.573599 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:43:00 crc kubenswrapper[4995]: I0120 17:43:00.571857 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:43:00 crc kubenswrapper[4995]: I0120 17:43:00.572404 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:43:00 crc kubenswrapper[4995]: I0120 17:43:00.572446 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:43:00 crc kubenswrapper[4995]: I0120 17:43:00.573265 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:43:00 crc kubenswrapper[4995]: I0120 17:43:00.573320 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" gracePeriod=600 Jan 20 17:43:00 crc kubenswrapper[4995]: E0120 17:43:00.704634 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:43:01 crc kubenswrapper[4995]: I0120 17:43:01.075858 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" exitCode=0 Jan 20 17:43:01 crc kubenswrapper[4995]: I0120 17:43:01.076232 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2"} Jan 20 17:43:01 crc kubenswrapper[4995]: I0120 17:43:01.076424 4995 scope.go:117] "RemoveContainer" containerID="774324e35d0bc3559af1e50e18189873e201a55ab4814c730585a5be26bc6dc5" Jan 20 17:43:01 crc kubenswrapper[4995]: I0120 17:43:01.077273 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:43:01 crc kubenswrapper[4995]: E0120 17:43:01.077752 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:43:13 crc kubenswrapper[4995]: I0120 17:43:13.991103 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:43:13 crc kubenswrapper[4995]: E0120 17:43:13.992204 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:43:24 crc kubenswrapper[4995]: I0120 17:43:24.989883 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:43:24 crc kubenswrapper[4995]: E0120 17:43:24.990922 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:43:38 crc kubenswrapper[4995]: I0120 17:43:38.989602 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:43:38 crc kubenswrapper[4995]: E0120 17:43:38.990485 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:43:53 crc kubenswrapper[4995]: I0120 17:43:53.989708 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:43:53 crc kubenswrapper[4995]: E0120 17:43:53.990590 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:44:04 crc kubenswrapper[4995]: I0120 17:44:04.989917 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:44:04 crc kubenswrapper[4995]: E0120 17:44:04.990969 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:44:19 crc kubenswrapper[4995]: I0120 17:44:19.989581 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:44:19 crc kubenswrapper[4995]: E0120 17:44:19.990956 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:44:30 crc kubenswrapper[4995]: I0120 17:44:30.990468 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:44:30 crc kubenswrapper[4995]: E0120 17:44:30.991845 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:44:43 crc kubenswrapper[4995]: I0120 17:44:43.989966 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:44:43 crc kubenswrapper[4995]: E0120 17:44:43.991300 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.908786 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ksr2k"] Jan 20 17:44:51 crc kubenswrapper[4995]: E0120 17:44:51.909820 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="registry-server" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.909838 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="registry-server" Jan 20 17:44:51 crc kubenswrapper[4995]: E0120 17:44:51.909863 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerName="registry-server" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.909870 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerName="registry-server" Jan 20 17:44:51 crc kubenswrapper[4995]: E0120 17:44:51.909893 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerName="extract-utilities" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.909901 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerName="extract-utilities" Jan 20 17:44:51 crc kubenswrapper[4995]: E0120 17:44:51.909925 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="extract-content" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.909933 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="extract-content" Jan 20 17:44:51 crc kubenswrapper[4995]: E0120 17:44:51.909942 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerName="registry-server" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.909950 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerName="registry-server" Jan 20 17:44:51 crc kubenswrapper[4995]: E0120 17:44:51.909963 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerName="extract-utilities" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.909970 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerName="extract-utilities" Jan 20 17:44:51 crc kubenswrapper[4995]: E0120 17:44:51.909983 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="extract-utilities" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.909991 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="extract-utilities" Jan 20 17:44:51 crc kubenswrapper[4995]: E0120 17:44:51.910002 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerName="extract-content" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.910008 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerName="extract-content" Jan 20 17:44:51 crc kubenswrapper[4995]: E0120 17:44:51.910017 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerName="extract-content" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.910022 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerName="extract-content" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.910244 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="55a308f3-74f2-4b58-bf20-6e7ea000625f" containerName="registry-server" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.910275 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="337ca5c6-f52e-4fc8-abef-dc5777b61358" containerName="registry-server" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.910290 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a790de9-6a4b-4acb-9c99-5cd92ec6fff3" containerName="registry-server" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.912060 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.919750 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ksr2k"] Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.958496 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5fhs\" (UniqueName: \"kubernetes.io/projected/662b03cf-8721-47c7-b8d3-6911b439a701-kube-api-access-f5fhs\") pod \"certified-operators-ksr2k\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.958582 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-catalog-content\") pod \"certified-operators-ksr2k\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:51 crc kubenswrapper[4995]: I0120 17:44:51.958602 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-utilities\") pod \"certified-operators-ksr2k\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:52 crc kubenswrapper[4995]: I0120 17:44:52.060441 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-catalog-content\") pod \"certified-operators-ksr2k\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:52 crc kubenswrapper[4995]: I0120 17:44:52.060769 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-utilities\") pod \"certified-operators-ksr2k\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:52 crc kubenswrapper[4995]: I0120 17:44:52.061045 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5fhs\" (UniqueName: \"kubernetes.io/projected/662b03cf-8721-47c7-b8d3-6911b439a701-kube-api-access-f5fhs\") pod \"certified-operators-ksr2k\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:52 crc kubenswrapper[4995]: I0120 17:44:52.061239 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-utilities\") pod \"certified-operators-ksr2k\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:52 crc kubenswrapper[4995]: I0120 17:44:52.061419 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-catalog-content\") pod \"certified-operators-ksr2k\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:52 crc kubenswrapper[4995]: I0120 17:44:52.239642 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5fhs\" (UniqueName: \"kubernetes.io/projected/662b03cf-8721-47c7-b8d3-6911b439a701-kube-api-access-f5fhs\") pod \"certified-operators-ksr2k\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:52 crc kubenswrapper[4995]: I0120 17:44:52.535942 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:44:53 crc kubenswrapper[4995]: I0120 17:44:53.023459 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ksr2k"] Jan 20 17:44:53 crc kubenswrapper[4995]: I0120 17:44:53.190790 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksr2k" event={"ID":"662b03cf-8721-47c7-b8d3-6911b439a701","Type":"ContainerStarted","Data":"8f6c8e33ef8d5e5c16e578889ce67c3dee12b0fdbd7215c3ba1a4fe2b7eaf4b3"} Jan 20 17:44:54 crc kubenswrapper[4995]: I0120 17:44:54.202126 4995 generic.go:334] "Generic (PLEG): container finished" podID="662b03cf-8721-47c7-b8d3-6911b439a701" containerID="16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e" exitCode=0 Jan 20 17:44:54 crc kubenswrapper[4995]: I0120 17:44:54.202170 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksr2k" event={"ID":"662b03cf-8721-47c7-b8d3-6911b439a701","Type":"ContainerDied","Data":"16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e"} Jan 20 17:44:56 crc kubenswrapper[4995]: I0120 17:44:56.222356 4995 generic.go:334] "Generic (PLEG): container finished" podID="662b03cf-8721-47c7-b8d3-6911b439a701" containerID="630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb" exitCode=0 Jan 20 17:44:56 crc kubenswrapper[4995]: I0120 17:44:56.222428 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksr2k" event={"ID":"662b03cf-8721-47c7-b8d3-6911b439a701","Type":"ContainerDied","Data":"630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb"} Jan 20 17:44:57 crc kubenswrapper[4995]: I0120 17:44:57.235921 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksr2k" event={"ID":"662b03cf-8721-47c7-b8d3-6911b439a701","Type":"ContainerStarted","Data":"141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078"} Jan 20 17:44:57 crc kubenswrapper[4995]: I0120 17:44:57.258976 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ksr2k" podStartSLOduration=3.716322221 podStartE2EDuration="6.258961004s" podCreationTimestamp="2026-01-20 17:44:51 +0000 UTC" firstStartedPulling="2026-01-20 17:44:54.203944269 +0000 UTC m=+4412.448549085" lastFinishedPulling="2026-01-20 17:44:56.746583062 +0000 UTC m=+4414.991187868" observedRunningTime="2026-01-20 17:44:57.256761795 +0000 UTC m=+4415.501366621" watchObservedRunningTime="2026-01-20 17:44:57.258961004 +0000 UTC m=+4415.503565810" Jan 20 17:44:57 crc kubenswrapper[4995]: I0120 17:44:57.990745 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:44:57 crc kubenswrapper[4995]: E0120 17:44:57.991310 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.194416 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs"] Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.195983 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.199611 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.199918 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.215608 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs"] Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.236051 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfnqc\" (UniqueName: \"kubernetes.io/projected/d4039f52-226d-493f-9d76-5c92ccaba556-kube-api-access-hfnqc\") pod \"collect-profiles-29482185-lwcjs\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.236256 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4039f52-226d-493f-9d76-5c92ccaba556-secret-volume\") pod \"collect-profiles-29482185-lwcjs\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.236382 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4039f52-226d-493f-9d76-5c92ccaba556-config-volume\") pod \"collect-profiles-29482185-lwcjs\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.338615 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4039f52-226d-493f-9d76-5c92ccaba556-secret-volume\") pod \"collect-profiles-29482185-lwcjs\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.338697 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4039f52-226d-493f-9d76-5c92ccaba556-config-volume\") pod \"collect-profiles-29482185-lwcjs\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.338894 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfnqc\" (UniqueName: \"kubernetes.io/projected/d4039f52-226d-493f-9d76-5c92ccaba556-kube-api-access-hfnqc\") pod \"collect-profiles-29482185-lwcjs\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.339655 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4039f52-226d-493f-9d76-5c92ccaba556-config-volume\") pod \"collect-profiles-29482185-lwcjs\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.354006 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4039f52-226d-493f-9d76-5c92ccaba556-secret-volume\") pod \"collect-profiles-29482185-lwcjs\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.362925 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfnqc\" (UniqueName: \"kubernetes.io/projected/d4039f52-226d-493f-9d76-5c92ccaba556-kube-api-access-hfnqc\") pod \"collect-profiles-29482185-lwcjs\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.516816 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:00 crc kubenswrapper[4995]: I0120 17:45:00.985626 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs"] Jan 20 17:45:01 crc kubenswrapper[4995]: I0120 17:45:01.279509 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" event={"ID":"d4039f52-226d-493f-9d76-5c92ccaba556","Type":"ContainerStarted","Data":"91b7c90b4414bdb0bdf4e592897e59bb055fe09877c0b12b675e6dd439592e23"} Jan 20 17:45:02 crc kubenswrapper[4995]: I0120 17:45:02.300007 4995 generic.go:334] "Generic (PLEG): container finished" podID="d4039f52-226d-493f-9d76-5c92ccaba556" containerID="42aa0d3da87eb12682e5fe21640d8f2df983b6ae5a0ecb5c7d3bdefc3bd35a32" exitCode=0 Jan 20 17:45:02 crc kubenswrapper[4995]: I0120 17:45:02.300169 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" event={"ID":"d4039f52-226d-493f-9d76-5c92ccaba556","Type":"ContainerDied","Data":"42aa0d3da87eb12682e5fe21640d8f2df983b6ae5a0ecb5c7d3bdefc3bd35a32"} Jan 20 17:45:02 crc kubenswrapper[4995]: I0120 17:45:02.536499 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:45:02 crc kubenswrapper[4995]: I0120 17:45:02.536845 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:45:02 crc kubenswrapper[4995]: I0120 17:45:02.582742 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:45:03 crc kubenswrapper[4995]: I0120 17:45:03.372983 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:45:03 crc kubenswrapper[4995]: I0120 17:45:03.433732 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ksr2k"] Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.647018 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.726295 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfnqc\" (UniqueName: \"kubernetes.io/projected/d4039f52-226d-493f-9d76-5c92ccaba556-kube-api-access-hfnqc\") pod \"d4039f52-226d-493f-9d76-5c92ccaba556\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.726436 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4039f52-226d-493f-9d76-5c92ccaba556-config-volume\") pod \"d4039f52-226d-493f-9d76-5c92ccaba556\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.726515 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4039f52-226d-493f-9d76-5c92ccaba556-secret-volume\") pod \"d4039f52-226d-493f-9d76-5c92ccaba556\" (UID: \"d4039f52-226d-493f-9d76-5c92ccaba556\") " Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.728929 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4039f52-226d-493f-9d76-5c92ccaba556-config-volume" (OuterVolumeSpecName: "config-volume") pod "d4039f52-226d-493f-9d76-5c92ccaba556" (UID: "d4039f52-226d-493f-9d76-5c92ccaba556"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.733190 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4039f52-226d-493f-9d76-5c92ccaba556-kube-api-access-hfnqc" (OuterVolumeSpecName: "kube-api-access-hfnqc") pod "d4039f52-226d-493f-9d76-5c92ccaba556" (UID: "d4039f52-226d-493f-9d76-5c92ccaba556"). InnerVolumeSpecName "kube-api-access-hfnqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.744140 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4039f52-226d-493f-9d76-5c92ccaba556-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d4039f52-226d-493f-9d76-5c92ccaba556" (UID: "d4039f52-226d-493f-9d76-5c92ccaba556"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.827532 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d4039f52-226d-493f-9d76-5c92ccaba556-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.827567 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfnqc\" (UniqueName: \"kubernetes.io/projected/d4039f52-226d-493f-9d76-5c92ccaba556-kube-api-access-hfnqc\") on node \"crc\" DevicePath \"\"" Jan 20 17:45:04 crc kubenswrapper[4995]: I0120 17:45:04.827578 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d4039f52-226d-493f-9d76-5c92ccaba556-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 17:45:05 crc kubenswrapper[4995]: I0120 17:45:05.400862 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" event={"ID":"d4039f52-226d-493f-9d76-5c92ccaba556","Type":"ContainerDied","Data":"91b7c90b4414bdb0bdf4e592897e59bb055fe09877c0b12b675e6dd439592e23"} Jan 20 17:45:05 crc kubenswrapper[4995]: I0120 17:45:05.401257 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91b7c90b4414bdb0bdf4e592897e59bb055fe09877c0b12b675e6dd439592e23" Jan 20 17:45:05 crc kubenswrapper[4995]: I0120 17:45:05.401145 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ksr2k" podUID="662b03cf-8721-47c7-b8d3-6911b439a701" containerName="registry-server" containerID="cri-o://141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078" gracePeriod=2 Jan 20 17:45:05 crc kubenswrapper[4995]: I0120 17:45:05.400880 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs" Jan 20 17:45:05 crc kubenswrapper[4995]: I0120 17:45:05.727795 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9"] Jan 20 17:45:05 crc kubenswrapper[4995]: I0120 17:45:05.738548 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482140-f7tf9"] Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.001912 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee" path="/var/lib/kubelet/pods/01850a82-5bc4-45d5-9d82-3d4b1f1ab8ee/volumes" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.411045 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.416216 4995 generic.go:334] "Generic (PLEG): container finished" podID="662b03cf-8721-47c7-b8d3-6911b439a701" containerID="141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078" exitCode=0 Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.416275 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksr2k" event={"ID":"662b03cf-8721-47c7-b8d3-6911b439a701","Type":"ContainerDied","Data":"141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078"} Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.416314 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ksr2k" event={"ID":"662b03cf-8721-47c7-b8d3-6911b439a701","Type":"ContainerDied","Data":"8f6c8e33ef8d5e5c16e578889ce67c3dee12b0fdbd7215c3ba1a4fe2b7eaf4b3"} Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.416348 4995 scope.go:117] "RemoveContainer" containerID="141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.449668 4995 scope.go:117] "RemoveContainer" containerID="630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.458519 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-utilities\") pod \"662b03cf-8721-47c7-b8d3-6911b439a701\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.458675 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-catalog-content\") pod \"662b03cf-8721-47c7-b8d3-6911b439a701\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.458776 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f5fhs\" (UniqueName: \"kubernetes.io/projected/662b03cf-8721-47c7-b8d3-6911b439a701-kube-api-access-f5fhs\") pod \"662b03cf-8721-47c7-b8d3-6911b439a701\" (UID: \"662b03cf-8721-47c7-b8d3-6911b439a701\") " Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.460723 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-utilities" (OuterVolumeSpecName: "utilities") pod "662b03cf-8721-47c7-b8d3-6911b439a701" (UID: "662b03cf-8721-47c7-b8d3-6911b439a701"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.470410 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/662b03cf-8721-47c7-b8d3-6911b439a701-kube-api-access-f5fhs" (OuterVolumeSpecName: "kube-api-access-f5fhs") pod "662b03cf-8721-47c7-b8d3-6911b439a701" (UID: "662b03cf-8721-47c7-b8d3-6911b439a701"). InnerVolumeSpecName "kube-api-access-f5fhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.499420 4995 scope.go:117] "RemoveContainer" containerID="16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.509705 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "662b03cf-8721-47c7-b8d3-6911b439a701" (UID: "662b03cf-8721-47c7-b8d3-6911b439a701"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.562321 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.562362 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/662b03cf-8721-47c7-b8d3-6911b439a701-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.562376 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f5fhs\" (UniqueName: \"kubernetes.io/projected/662b03cf-8721-47c7-b8d3-6911b439a701-kube-api-access-f5fhs\") on node \"crc\" DevicePath \"\"" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.569978 4995 scope.go:117] "RemoveContainer" containerID="141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078" Jan 20 17:45:06 crc kubenswrapper[4995]: E0120 17:45:06.570602 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078\": container with ID starting with 141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078 not found: ID does not exist" containerID="141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.570649 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078"} err="failed to get container status \"141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078\": rpc error: code = NotFound desc = could not find container \"141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078\": container with ID starting with 141f6fe5856c290286138f98871c7ab8969c4ec69c4d18649664b6a51e161078 not found: ID does not exist" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.570680 4995 scope.go:117] "RemoveContainer" containerID="630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb" Jan 20 17:45:06 crc kubenswrapper[4995]: E0120 17:45:06.570969 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb\": container with ID starting with 630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb not found: ID does not exist" containerID="630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.571000 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb"} err="failed to get container status \"630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb\": rpc error: code = NotFound desc = could not find container \"630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb\": container with ID starting with 630077668cd649b71a6b9203c5fe662a211bedc71a50d799cfaadbad511229cb not found: ID does not exist" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.571023 4995 scope.go:117] "RemoveContainer" containerID="16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e" Jan 20 17:45:06 crc kubenswrapper[4995]: E0120 17:45:06.571535 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e\": container with ID starting with 16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e not found: ID does not exist" containerID="16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e" Jan 20 17:45:06 crc kubenswrapper[4995]: I0120 17:45:06.571570 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e"} err="failed to get container status \"16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e\": rpc error: code = NotFound desc = could not find container \"16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e\": container with ID starting with 16c6ed4d0cb2b0069da13054da0d226b642d58d2a98df96abc57c756a495045e not found: ID does not exist" Jan 20 17:45:07 crc kubenswrapper[4995]: I0120 17:45:07.429272 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ksr2k" Jan 20 17:45:07 crc kubenswrapper[4995]: I0120 17:45:07.467848 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ksr2k"] Jan 20 17:45:07 crc kubenswrapper[4995]: I0120 17:45:07.479109 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ksr2k"] Jan 20 17:45:08 crc kubenswrapper[4995]: I0120 17:45:08.002792 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="662b03cf-8721-47c7-b8d3-6911b439a701" path="/var/lib/kubelet/pods/662b03cf-8721-47c7-b8d3-6911b439a701/volumes" Jan 20 17:45:12 crc kubenswrapper[4995]: I0120 17:45:12.989444 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:45:12 crc kubenswrapper[4995]: E0120 17:45:12.990176 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:45:25 crc kubenswrapper[4995]: I0120 17:45:25.989998 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:45:25 crc kubenswrapper[4995]: E0120 17:45:25.990732 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:45:34 crc kubenswrapper[4995]: I0120 17:45:34.949770 4995 scope.go:117] "RemoveContainer" containerID="a0440b2bc16b41337f9949292a83a62e98dba09f273ab8edcb2663c4f44eff58" Jan 20 17:45:39 crc kubenswrapper[4995]: I0120 17:45:39.989555 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:45:39 crc kubenswrapper[4995]: E0120 17:45:39.990522 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:45:54 crc kubenswrapper[4995]: I0120 17:45:54.989614 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:45:54 crc kubenswrapper[4995]: E0120 17:45:54.990588 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:46:06 crc kubenswrapper[4995]: I0120 17:46:06.990289 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:46:06 crc kubenswrapper[4995]: E0120 17:46:06.991616 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:46:18 crc kubenswrapper[4995]: I0120 17:46:18.990317 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:46:18 crc kubenswrapper[4995]: E0120 17:46:18.991513 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:46:30 crc kubenswrapper[4995]: I0120 17:46:30.989253 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:46:30 crc kubenswrapper[4995]: E0120 17:46:30.990019 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:46:42 crc kubenswrapper[4995]: I0120 17:46:42.989550 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:46:42 crc kubenswrapper[4995]: E0120 17:46:42.990490 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:46:56 crc kubenswrapper[4995]: I0120 17:46:55.999404 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:46:56 crc kubenswrapper[4995]: E0120 17:46:56.000455 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:47:10 crc kubenswrapper[4995]: I0120 17:47:10.989812 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:47:10 crc kubenswrapper[4995]: E0120 17:47:10.990718 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:47:24 crc kubenswrapper[4995]: I0120 17:47:24.990045 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:47:24 crc kubenswrapper[4995]: E0120 17:47:24.991002 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:47:36 crc kubenswrapper[4995]: I0120 17:47:36.990039 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:47:36 crc kubenswrapper[4995]: E0120 17:47:36.990982 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:47:48 crc kubenswrapper[4995]: I0120 17:47:48.989712 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:47:48 crc kubenswrapper[4995]: E0120 17:47:48.990682 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:47:59 crc kubenswrapper[4995]: I0120 17:47:59.990481 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:47:59 crc kubenswrapper[4995]: E0120 17:47:59.991468 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:48:14 crc kubenswrapper[4995]: I0120 17:48:14.990514 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:48:16 crc kubenswrapper[4995]: I0120 17:48:16.371391 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"e00a298fac5aa085cd546b6a0e6d19c6993a6759286f5bb3971e40447f12970f"} Jan 20 17:50:30 crc kubenswrapper[4995]: I0120 17:50:30.571609 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:50:30 crc kubenswrapper[4995]: I0120 17:50:30.572521 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:51:00 crc kubenswrapper[4995]: I0120 17:51:00.572231 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:51:00 crc kubenswrapper[4995]: I0120 17:51:00.572918 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.204540 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xbjrz"] Jan 20 17:51:26 crc kubenswrapper[4995]: E0120 17:51:26.205928 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="662b03cf-8721-47c7-b8d3-6911b439a701" containerName="registry-server" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.205947 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="662b03cf-8721-47c7-b8d3-6911b439a701" containerName="registry-server" Jan 20 17:51:26 crc kubenswrapper[4995]: E0120 17:51:26.205969 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="662b03cf-8721-47c7-b8d3-6911b439a701" containerName="extract-utilities" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.205979 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="662b03cf-8721-47c7-b8d3-6911b439a701" containerName="extract-utilities" Jan 20 17:51:26 crc kubenswrapper[4995]: E0120 17:51:26.205994 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="662b03cf-8721-47c7-b8d3-6911b439a701" containerName="extract-content" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.206003 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="662b03cf-8721-47c7-b8d3-6911b439a701" containerName="extract-content" Jan 20 17:51:26 crc kubenswrapper[4995]: E0120 17:51:26.206029 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4039f52-226d-493f-9d76-5c92ccaba556" containerName="collect-profiles" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.206036 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4039f52-226d-493f-9d76-5c92ccaba556" containerName="collect-profiles" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.206298 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4039f52-226d-493f-9d76-5c92ccaba556" containerName="collect-profiles" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.206315 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="662b03cf-8721-47c7-b8d3-6911b439a701" containerName="registry-server" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.211428 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.247868 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbjrz"] Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.279880 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-catalog-content\") pod \"redhat-marketplace-xbjrz\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.280267 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-utilities\") pod \"redhat-marketplace-xbjrz\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.280400 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76dxf\" (UniqueName: \"kubernetes.io/projected/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-kube-api-access-76dxf\") pod \"redhat-marketplace-xbjrz\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.381834 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-utilities\") pod \"redhat-marketplace-xbjrz\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.382373 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-utilities\") pod \"redhat-marketplace-xbjrz\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.382542 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76dxf\" (UniqueName: \"kubernetes.io/projected/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-kube-api-access-76dxf\") pod \"redhat-marketplace-xbjrz\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.382993 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-catalog-content\") pod \"redhat-marketplace-xbjrz\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.383343 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-catalog-content\") pod \"redhat-marketplace-xbjrz\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.410517 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76dxf\" (UniqueName: \"kubernetes.io/projected/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-kube-api-access-76dxf\") pod \"redhat-marketplace-xbjrz\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:26 crc kubenswrapper[4995]: I0120 17:51:26.560857 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:27 crc kubenswrapper[4995]: I0120 17:51:27.044797 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbjrz"] Jan 20 17:51:28 crc kubenswrapper[4995]: I0120 17:51:28.480276 4995 generic.go:334] "Generic (PLEG): container finished" podID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerID="3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63" exitCode=0 Jan 20 17:51:28 crc kubenswrapper[4995]: I0120 17:51:28.480383 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbjrz" event={"ID":"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16","Type":"ContainerDied","Data":"3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63"} Jan 20 17:51:28 crc kubenswrapper[4995]: I0120 17:51:28.483381 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbjrz" event={"ID":"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16","Type":"ContainerStarted","Data":"93a5991a732e11b6672c314f9e3c24d00707b089ac7f849d2d10cfd232f194f7"} Jan 20 17:51:28 crc kubenswrapper[4995]: I0120 17:51:28.483320 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 17:51:29 crc kubenswrapper[4995]: I0120 17:51:29.492716 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbjrz" event={"ID":"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16","Type":"ContainerStarted","Data":"87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01"} Jan 20 17:51:30 crc kubenswrapper[4995]: I0120 17:51:30.513818 4995 generic.go:334] "Generic (PLEG): container finished" podID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerID="87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01" exitCode=0 Jan 20 17:51:30 crc kubenswrapper[4995]: I0120 17:51:30.513861 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbjrz" event={"ID":"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16","Type":"ContainerDied","Data":"87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01"} Jan 20 17:51:30 crc kubenswrapper[4995]: I0120 17:51:30.571735 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:51:30 crc kubenswrapper[4995]: I0120 17:51:30.571830 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:51:30 crc kubenswrapper[4995]: I0120 17:51:30.571903 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:51:30 crc kubenswrapper[4995]: I0120 17:51:30.572991 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e00a298fac5aa085cd546b6a0e6d19c6993a6759286f5bb3971e40447f12970f"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:51:30 crc kubenswrapper[4995]: I0120 17:51:30.573191 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://e00a298fac5aa085cd546b6a0e6d19c6993a6759286f5bb3971e40447f12970f" gracePeriod=600 Jan 20 17:51:31 crc kubenswrapper[4995]: I0120 17:51:31.526170 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbjrz" event={"ID":"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16","Type":"ContainerStarted","Data":"c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b"} Jan 20 17:51:31 crc kubenswrapper[4995]: I0120 17:51:31.529467 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="e00a298fac5aa085cd546b6a0e6d19c6993a6759286f5bb3971e40447f12970f" exitCode=0 Jan 20 17:51:31 crc kubenswrapper[4995]: I0120 17:51:31.529491 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"e00a298fac5aa085cd546b6a0e6d19c6993a6759286f5bb3971e40447f12970f"} Jan 20 17:51:31 crc kubenswrapper[4995]: I0120 17:51:31.529524 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a"} Jan 20 17:51:31 crc kubenswrapper[4995]: I0120 17:51:31.529542 4995 scope.go:117] "RemoveContainer" containerID="3947198fcce0915f8aee07cc30fb4ecbb305e596bb8ef1218e59ffa027a974a2" Jan 20 17:51:31 crc kubenswrapper[4995]: I0120 17:51:31.553339 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xbjrz" podStartSLOduration=3.094026613 podStartE2EDuration="5.55331645s" podCreationTimestamp="2026-01-20 17:51:26 +0000 UTC" firstStartedPulling="2026-01-20 17:51:28.482910055 +0000 UTC m=+4806.727514891" lastFinishedPulling="2026-01-20 17:51:30.942199922 +0000 UTC m=+4809.186804728" observedRunningTime="2026-01-20 17:51:31.550903905 +0000 UTC m=+4809.795508731" watchObservedRunningTime="2026-01-20 17:51:31.55331645 +0000 UTC m=+4809.797921266" Jan 20 17:51:36 crc kubenswrapper[4995]: I0120 17:51:36.562137 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:36 crc kubenswrapper[4995]: I0120 17:51:36.562844 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:36 crc kubenswrapper[4995]: I0120 17:51:36.633726 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:36 crc kubenswrapper[4995]: I0120 17:51:36.695373 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:36 crc kubenswrapper[4995]: I0120 17:51:36.878664 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbjrz"] Jan 20 17:51:38 crc kubenswrapper[4995]: I0120 17:51:38.602297 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xbjrz" podUID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerName="registry-server" containerID="cri-o://c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b" gracePeriod=2 Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.133011 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.255263 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-catalog-content\") pod \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.255623 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76dxf\" (UniqueName: \"kubernetes.io/projected/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-kube-api-access-76dxf\") pod \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.255683 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-utilities\") pod \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\" (UID: \"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16\") " Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.256770 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-utilities" (OuterVolumeSpecName: "utilities") pod "5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" (UID: "5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.265462 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-kube-api-access-76dxf" (OuterVolumeSpecName: "kube-api-access-76dxf") pod "5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" (UID: "5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16"). InnerVolumeSpecName "kube-api-access-76dxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.277911 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" (UID: "5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.358910 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.358965 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76dxf\" (UniqueName: \"kubernetes.io/projected/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-kube-api-access-76dxf\") on node \"crc\" DevicePath \"\"" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.358984 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.615878 4995 generic.go:334] "Generic (PLEG): container finished" podID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerID="c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b" exitCode=0 Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.615938 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xbjrz" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.615955 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbjrz" event={"ID":"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16","Type":"ContainerDied","Data":"c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b"} Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.616005 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xbjrz" event={"ID":"5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16","Type":"ContainerDied","Data":"93a5991a732e11b6672c314f9e3c24d00707b089ac7f849d2d10cfd232f194f7"} Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.616025 4995 scope.go:117] "RemoveContainer" containerID="c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.646450 4995 scope.go:117] "RemoveContainer" containerID="87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.661290 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbjrz"] Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.685929 4995 scope.go:117] "RemoveContainer" containerID="3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.708360 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xbjrz"] Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.747803 4995 scope.go:117] "RemoveContainer" containerID="c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b" Jan 20 17:51:39 crc kubenswrapper[4995]: E0120 17:51:39.748331 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b\": container with ID starting with c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b not found: ID does not exist" containerID="c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.748387 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b"} err="failed to get container status \"c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b\": rpc error: code = NotFound desc = could not find container \"c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b\": container with ID starting with c06e4a2c48d1255212c4703106aee8534d7dd6a9d034a767c9bda70e4429bf6b not found: ID does not exist" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.748419 4995 scope.go:117] "RemoveContainer" containerID="87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01" Jan 20 17:51:39 crc kubenswrapper[4995]: E0120 17:51:39.748918 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01\": container with ID starting with 87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01 not found: ID does not exist" containerID="87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.748971 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01"} err="failed to get container status \"87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01\": rpc error: code = NotFound desc = could not find container \"87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01\": container with ID starting with 87286e20df7e8583e53b5d3d6bec8fb5b080295b77b5311d54371b11357b0a01 not found: ID does not exist" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.749005 4995 scope.go:117] "RemoveContainer" containerID="3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63" Jan 20 17:51:39 crc kubenswrapper[4995]: E0120 17:51:39.749473 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63\": container with ID starting with 3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63 not found: ID does not exist" containerID="3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63" Jan 20 17:51:39 crc kubenswrapper[4995]: I0120 17:51:39.749501 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63"} err="failed to get container status \"3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63\": rpc error: code = NotFound desc = could not find container \"3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63\": container with ID starting with 3b216b691c8304de0205cd46f407b65ff885f82694b6243dea5130ad6304ba63 not found: ID does not exist" Jan 20 17:51:40 crc kubenswrapper[4995]: I0120 17:51:40.000155 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" path="/var/lib/kubelet/pods/5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16/volumes" Jan 20 17:52:24 crc kubenswrapper[4995]: I0120 17:52:24.858454 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x8bmt"] Jan 20 17:52:24 crc kubenswrapper[4995]: E0120 17:52:24.859619 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerName="registry-server" Jan 20 17:52:24 crc kubenswrapper[4995]: I0120 17:52:24.859637 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerName="registry-server" Jan 20 17:52:24 crc kubenswrapper[4995]: E0120 17:52:24.859670 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerName="extract-utilities" Jan 20 17:52:24 crc kubenswrapper[4995]: I0120 17:52:24.859678 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerName="extract-utilities" Jan 20 17:52:24 crc kubenswrapper[4995]: E0120 17:52:24.859706 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerName="extract-content" Jan 20 17:52:24 crc kubenswrapper[4995]: I0120 17:52:24.859715 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerName="extract-content" Jan 20 17:52:24 crc kubenswrapper[4995]: I0120 17:52:24.859959 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="5303b0c9-cb29-4bc0-a5b0-accf7eaf5a16" containerName="registry-server" Jan 20 17:52:24 crc kubenswrapper[4995]: I0120 17:52:24.861719 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:24 crc kubenswrapper[4995]: I0120 17:52:24.877814 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x8bmt"] Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.017609 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frvj8\" (UniqueName: \"kubernetes.io/projected/d50abdf3-9c50-49ea-b698-4cc83fdd524e-kube-api-access-frvj8\") pod \"community-operators-x8bmt\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.017791 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-catalog-content\") pod \"community-operators-x8bmt\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.017860 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-utilities\") pod \"community-operators-x8bmt\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.119608 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frvj8\" (UniqueName: \"kubernetes.io/projected/d50abdf3-9c50-49ea-b698-4cc83fdd524e-kube-api-access-frvj8\") pod \"community-operators-x8bmt\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.119992 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-catalog-content\") pod \"community-operators-x8bmt\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.120037 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-utilities\") pod \"community-operators-x8bmt\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.120688 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-utilities\") pod \"community-operators-x8bmt\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.120877 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-catalog-content\") pod \"community-operators-x8bmt\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.141928 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frvj8\" (UniqueName: \"kubernetes.io/projected/d50abdf3-9c50-49ea-b698-4cc83fdd524e-kube-api-access-frvj8\") pod \"community-operators-x8bmt\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.189562 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:25 crc kubenswrapper[4995]: I0120 17:52:25.706598 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x8bmt"] Jan 20 17:52:26 crc kubenswrapper[4995]: I0120 17:52:26.087967 4995 generic.go:334] "Generic (PLEG): container finished" podID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerID="14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a" exitCode=0 Jan 20 17:52:26 crc kubenswrapper[4995]: I0120 17:52:26.088058 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8bmt" event={"ID":"d50abdf3-9c50-49ea-b698-4cc83fdd524e","Type":"ContainerDied","Data":"14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a"} Jan 20 17:52:26 crc kubenswrapper[4995]: I0120 17:52:26.088317 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8bmt" event={"ID":"d50abdf3-9c50-49ea-b698-4cc83fdd524e","Type":"ContainerStarted","Data":"e199d8aa00cbd0d6808e082e3abbcb8a87eaf40c2b07db47a8f75cd8ff6830af"} Jan 20 17:52:27 crc kubenswrapper[4995]: I0120 17:52:27.104351 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8bmt" event={"ID":"d50abdf3-9c50-49ea-b698-4cc83fdd524e","Type":"ContainerStarted","Data":"5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa"} Jan 20 17:52:28 crc kubenswrapper[4995]: I0120 17:52:28.121749 4995 generic.go:334] "Generic (PLEG): container finished" podID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerID="5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa" exitCode=0 Jan 20 17:52:28 crc kubenswrapper[4995]: I0120 17:52:28.121821 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8bmt" event={"ID":"d50abdf3-9c50-49ea-b698-4cc83fdd524e","Type":"ContainerDied","Data":"5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa"} Jan 20 17:52:29 crc kubenswrapper[4995]: I0120 17:52:29.139374 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8bmt" event={"ID":"d50abdf3-9c50-49ea-b698-4cc83fdd524e","Type":"ContainerStarted","Data":"2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391"} Jan 20 17:52:29 crc kubenswrapper[4995]: I0120 17:52:29.168812 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x8bmt" podStartSLOduration=2.667200999 podStartE2EDuration="5.168794255s" podCreationTimestamp="2026-01-20 17:52:24 +0000 UTC" firstStartedPulling="2026-01-20 17:52:26.090513976 +0000 UTC m=+4864.335118782" lastFinishedPulling="2026-01-20 17:52:28.592107182 +0000 UTC m=+4866.836712038" observedRunningTime="2026-01-20 17:52:29.160357106 +0000 UTC m=+4867.404961922" watchObservedRunningTime="2026-01-20 17:52:29.168794255 +0000 UTC m=+4867.413399061" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.623953 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-s9mdw"] Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.627099 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.640536 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s9mdw"] Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.736176 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-utilities\") pod \"redhat-operators-s9mdw\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.736312 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmzkh\" (UniqueName: \"kubernetes.io/projected/27d4dd2c-e607-46b9-a297-f29ed44caf4f-kube-api-access-qmzkh\") pod \"redhat-operators-s9mdw\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.736482 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-catalog-content\") pod \"redhat-operators-s9mdw\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.838033 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-catalog-content\") pod \"redhat-operators-s9mdw\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.838122 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-utilities\") pod \"redhat-operators-s9mdw\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.838197 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmzkh\" (UniqueName: \"kubernetes.io/projected/27d4dd2c-e607-46b9-a297-f29ed44caf4f-kube-api-access-qmzkh\") pod \"redhat-operators-s9mdw\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.838538 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-catalog-content\") pod \"redhat-operators-s9mdw\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.838709 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-utilities\") pod \"redhat-operators-s9mdw\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.872676 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmzkh\" (UniqueName: \"kubernetes.io/projected/27d4dd2c-e607-46b9-a297-f29ed44caf4f-kube-api-access-qmzkh\") pod \"redhat-operators-s9mdw\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:30 crc kubenswrapper[4995]: I0120 17:52:30.971631 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:31 crc kubenswrapper[4995]: I0120 17:52:31.494703 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s9mdw"] Jan 20 17:52:32 crc kubenswrapper[4995]: I0120 17:52:32.184770 4995 generic.go:334] "Generic (PLEG): container finished" podID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerID="b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f" exitCode=0 Jan 20 17:52:32 crc kubenswrapper[4995]: I0120 17:52:32.184907 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9mdw" event={"ID":"27d4dd2c-e607-46b9-a297-f29ed44caf4f","Type":"ContainerDied","Data":"b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f"} Jan 20 17:52:32 crc kubenswrapper[4995]: I0120 17:52:32.185235 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9mdw" event={"ID":"27d4dd2c-e607-46b9-a297-f29ed44caf4f","Type":"ContainerStarted","Data":"fde8c174c3d9d540ce606bbdf00fde303fd4e1ad57807b44af74a4b1436dddf6"} Jan 20 17:52:34 crc kubenswrapper[4995]: I0120 17:52:34.209888 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9mdw" event={"ID":"27d4dd2c-e607-46b9-a297-f29ed44caf4f","Type":"ContainerStarted","Data":"fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b"} Jan 20 17:52:35 crc kubenswrapper[4995]: I0120 17:52:35.190500 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:35 crc kubenswrapper[4995]: I0120 17:52:35.191051 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:35 crc kubenswrapper[4995]: I0120 17:52:35.263101 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:35 crc kubenswrapper[4995]: I0120 17:52:35.317356 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:36 crc kubenswrapper[4995]: I0120 17:52:36.249263 4995 generic.go:334] "Generic (PLEG): container finished" podID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerID="fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b" exitCode=0 Jan 20 17:52:36 crc kubenswrapper[4995]: I0120 17:52:36.249348 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9mdw" event={"ID":"27d4dd2c-e607-46b9-a297-f29ed44caf4f","Type":"ContainerDied","Data":"fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b"} Jan 20 17:52:36 crc kubenswrapper[4995]: I0120 17:52:36.414399 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x8bmt"] Jan 20 17:52:37 crc kubenswrapper[4995]: I0120 17:52:37.260139 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x8bmt" podUID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerName="registry-server" containerID="cri-o://2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391" gracePeriod=2 Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.002461 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.105347 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-catalog-content\") pod \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.105815 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-utilities\") pod \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.105990 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frvj8\" (UniqueName: \"kubernetes.io/projected/d50abdf3-9c50-49ea-b698-4cc83fdd524e-kube-api-access-frvj8\") pod \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\" (UID: \"d50abdf3-9c50-49ea-b698-4cc83fdd524e\") " Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.106486 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-utilities" (OuterVolumeSpecName: "utilities") pod "d50abdf3-9c50-49ea-b698-4cc83fdd524e" (UID: "d50abdf3-9c50-49ea-b698-4cc83fdd524e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.106833 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.111436 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d50abdf3-9c50-49ea-b698-4cc83fdd524e-kube-api-access-frvj8" (OuterVolumeSpecName: "kube-api-access-frvj8") pod "d50abdf3-9c50-49ea-b698-4cc83fdd524e" (UID: "d50abdf3-9c50-49ea-b698-4cc83fdd524e"). InnerVolumeSpecName "kube-api-access-frvj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.166226 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d50abdf3-9c50-49ea-b698-4cc83fdd524e" (UID: "d50abdf3-9c50-49ea-b698-4cc83fdd524e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.208920 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frvj8\" (UniqueName: \"kubernetes.io/projected/d50abdf3-9c50-49ea-b698-4cc83fdd524e-kube-api-access-frvj8\") on node \"crc\" DevicePath \"\"" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.208989 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d50abdf3-9c50-49ea-b698-4cc83fdd524e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.275274 4995 generic.go:334] "Generic (PLEG): container finished" podID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerID="2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391" exitCode=0 Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.275328 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8bmt" event={"ID":"d50abdf3-9c50-49ea-b698-4cc83fdd524e","Type":"ContainerDied","Data":"2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391"} Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.275695 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x8bmt" event={"ID":"d50abdf3-9c50-49ea-b698-4cc83fdd524e","Type":"ContainerDied","Data":"e199d8aa00cbd0d6808e082e3abbcb8a87eaf40c2b07db47a8f75cd8ff6830af"} Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.275378 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x8bmt" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.275731 4995 scope.go:117] "RemoveContainer" containerID="2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.278978 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9mdw" event={"ID":"27d4dd2c-e607-46b9-a297-f29ed44caf4f","Type":"ContainerStarted","Data":"e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25"} Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.305885 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-s9mdw" podStartSLOduration=3.77625797 podStartE2EDuration="8.305868698s" podCreationTimestamp="2026-01-20 17:52:30 +0000 UTC" firstStartedPulling="2026-01-20 17:52:32.186852339 +0000 UTC m=+4870.431457155" lastFinishedPulling="2026-01-20 17:52:36.716463057 +0000 UTC m=+4874.961067883" observedRunningTime="2026-01-20 17:52:38.304585322 +0000 UTC m=+4876.549190138" watchObservedRunningTime="2026-01-20 17:52:38.305868698 +0000 UTC m=+4876.550473514" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.316509 4995 scope.go:117] "RemoveContainer" containerID="5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.335284 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x8bmt"] Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.350507 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x8bmt"] Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.359457 4995 scope.go:117] "RemoveContainer" containerID="14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.379398 4995 scope.go:117] "RemoveContainer" containerID="2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391" Jan 20 17:52:38 crc kubenswrapper[4995]: E0120 17:52:38.379791 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391\": container with ID starting with 2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391 not found: ID does not exist" containerID="2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.379825 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391"} err="failed to get container status \"2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391\": rpc error: code = NotFound desc = could not find container \"2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391\": container with ID starting with 2a150b701e1d02f69213d2b89f83702994acce06bb45f87d7cef60a08a11a391 not found: ID does not exist" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.379852 4995 scope.go:117] "RemoveContainer" containerID="5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa" Jan 20 17:52:38 crc kubenswrapper[4995]: E0120 17:52:38.380268 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa\": container with ID starting with 5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa not found: ID does not exist" containerID="5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.380292 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa"} err="failed to get container status \"5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa\": rpc error: code = NotFound desc = could not find container \"5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa\": container with ID starting with 5ed99187035a5ac104e41dd348d4675f97865a94751e101b984c990590751afa not found: ID does not exist" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.380309 4995 scope.go:117] "RemoveContainer" containerID="14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a" Jan 20 17:52:38 crc kubenswrapper[4995]: E0120 17:52:38.380676 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a\": container with ID starting with 14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a not found: ID does not exist" containerID="14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a" Jan 20 17:52:38 crc kubenswrapper[4995]: I0120 17:52:38.380700 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a"} err="failed to get container status \"14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a\": rpc error: code = NotFound desc = could not find container \"14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a\": container with ID starting with 14fbdb72c6810438ab4cf27f693ab3af801d606bc95f56937198e775332acd8a not found: ID does not exist" Jan 20 17:52:40 crc kubenswrapper[4995]: I0120 17:52:40.002627 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" path="/var/lib/kubelet/pods/d50abdf3-9c50-49ea-b698-4cc83fdd524e/volumes" Jan 20 17:52:40 crc kubenswrapper[4995]: I0120 17:52:40.971950 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:40 crc kubenswrapper[4995]: I0120 17:52:40.972018 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:42 crc kubenswrapper[4995]: I0120 17:52:42.033469 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-s9mdw" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerName="registry-server" probeResult="failure" output=< Jan 20 17:52:42 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 17:52:42 crc kubenswrapper[4995]: > Jan 20 17:52:51 crc kubenswrapper[4995]: I0120 17:52:51.053807 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:51 crc kubenswrapper[4995]: I0120 17:52:51.117754 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:51 crc kubenswrapper[4995]: I0120 17:52:51.291409 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s9mdw"] Jan 20 17:52:52 crc kubenswrapper[4995]: I0120 17:52:52.437943 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-s9mdw" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerName="registry-server" containerID="cri-o://e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25" gracePeriod=2 Jan 20 17:52:52 crc kubenswrapper[4995]: I0120 17:52:52.957150 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.127925 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-utilities\") pod \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.128108 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmzkh\" (UniqueName: \"kubernetes.io/projected/27d4dd2c-e607-46b9-a297-f29ed44caf4f-kube-api-access-qmzkh\") pod \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.128187 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-catalog-content\") pod \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\" (UID: \"27d4dd2c-e607-46b9-a297-f29ed44caf4f\") " Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.129127 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-utilities" (OuterVolumeSpecName: "utilities") pod "27d4dd2c-e607-46b9-a297-f29ed44caf4f" (UID: "27d4dd2c-e607-46b9-a297-f29ed44caf4f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.136397 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27d4dd2c-e607-46b9-a297-f29ed44caf4f-kube-api-access-qmzkh" (OuterVolumeSpecName: "kube-api-access-qmzkh") pod "27d4dd2c-e607-46b9-a297-f29ed44caf4f" (UID: "27d4dd2c-e607-46b9-a297-f29ed44caf4f"). InnerVolumeSpecName "kube-api-access-qmzkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.231297 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.231632 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmzkh\" (UniqueName: \"kubernetes.io/projected/27d4dd2c-e607-46b9-a297-f29ed44caf4f-kube-api-access-qmzkh\") on node \"crc\" DevicePath \"\"" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.280922 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "27d4dd2c-e607-46b9-a297-f29ed44caf4f" (UID: "27d4dd2c-e607-46b9-a297-f29ed44caf4f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.333785 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27d4dd2c-e607-46b9-a297-f29ed44caf4f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.452160 4995 generic.go:334] "Generic (PLEG): container finished" podID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerID="e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25" exitCode=0 Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.452221 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9mdw" event={"ID":"27d4dd2c-e607-46b9-a297-f29ed44caf4f","Type":"ContainerDied","Data":"e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25"} Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.452265 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s9mdw" event={"ID":"27d4dd2c-e607-46b9-a297-f29ed44caf4f","Type":"ContainerDied","Data":"fde8c174c3d9d540ce606bbdf00fde303fd4e1ad57807b44af74a4b1436dddf6"} Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.452296 4995 scope.go:117] "RemoveContainer" containerID="e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.452494 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s9mdw" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.501346 4995 scope.go:117] "RemoveContainer" containerID="fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.507932 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s9mdw"] Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.526450 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-s9mdw"] Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.540408 4995 scope.go:117] "RemoveContainer" containerID="b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.598536 4995 scope.go:117] "RemoveContainer" containerID="e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25" Jan 20 17:52:53 crc kubenswrapper[4995]: E0120 17:52:53.598919 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25\": container with ID starting with e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25 not found: ID does not exist" containerID="e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.599045 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25"} err="failed to get container status \"e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25\": rpc error: code = NotFound desc = could not find container \"e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25\": container with ID starting with e03aabcd2bfc83c0388f24c7cfdb5657b059312c7c181e3f8e003adb789c3f25 not found: ID does not exist" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.599209 4995 scope.go:117] "RemoveContainer" containerID="fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b" Jan 20 17:52:53 crc kubenswrapper[4995]: E0120 17:52:53.599774 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b\": container with ID starting with fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b not found: ID does not exist" containerID="fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.599901 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b"} err="failed to get container status \"fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b\": rpc error: code = NotFound desc = could not find container \"fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b\": container with ID starting with fee4abda44b395df7b66980815d6878a61a3a9ac6df0133240ec845501d2948b not found: ID does not exist" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.600017 4995 scope.go:117] "RemoveContainer" containerID="b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f" Jan 20 17:52:53 crc kubenswrapper[4995]: E0120 17:52:53.600461 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f\": container with ID starting with b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f not found: ID does not exist" containerID="b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f" Jan 20 17:52:53 crc kubenswrapper[4995]: I0120 17:52:53.600556 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f"} err="failed to get container status \"b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f\": rpc error: code = NotFound desc = could not find container \"b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f\": container with ID starting with b21c8b11034c06148251445d4e96bf0abfdcb425f81f2a34fc42268c27c6ec1f not found: ID does not exist" Jan 20 17:52:54 crc kubenswrapper[4995]: I0120 17:52:54.001286 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" path="/var/lib/kubelet/pods/27d4dd2c-e607-46b9-a297-f29ed44caf4f/volumes" Jan 20 17:53:30 crc kubenswrapper[4995]: I0120 17:53:30.571568 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:53:30 crc kubenswrapper[4995]: I0120 17:53:30.572245 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:54:00 crc kubenswrapper[4995]: I0120 17:54:00.571373 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:54:00 crc kubenswrapper[4995]: I0120 17:54:00.572041 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:54:30 crc kubenswrapper[4995]: I0120 17:54:30.571355 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 17:54:30 crc kubenswrapper[4995]: I0120 17:54:30.572064 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 17:54:30 crc kubenswrapper[4995]: I0120 17:54:30.572154 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 17:54:30 crc kubenswrapper[4995]: I0120 17:54:30.573072 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 17:54:30 crc kubenswrapper[4995]: I0120 17:54:30.573194 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" gracePeriod=600 Jan 20 17:54:30 crc kubenswrapper[4995]: E0120 17:54:30.727671 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:54:31 crc kubenswrapper[4995]: I0120 17:54:31.597795 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" exitCode=0 Jan 20 17:54:31 crc kubenswrapper[4995]: I0120 17:54:31.597982 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a"} Jan 20 17:54:31 crc kubenswrapper[4995]: I0120 17:54:31.598353 4995 scope.go:117] "RemoveContainer" containerID="e00a298fac5aa085cd546b6a0e6d19c6993a6759286f5bb3971e40447f12970f" Jan 20 17:54:31 crc kubenswrapper[4995]: I0120 17:54:31.599730 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:54:31 crc kubenswrapper[4995]: E0120 17:54:31.600271 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:54:43 crc kubenswrapper[4995]: I0120 17:54:43.990177 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:54:43 crc kubenswrapper[4995]: E0120 17:54:43.991068 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:54:52 crc kubenswrapper[4995]: I0120 17:54:52.843137 4995 generic.go:334] "Generic (PLEG): container finished" podID="356ca6c0-8604-40b3-b965-af9225ea185f" containerID="64a19a3fdb566ad3aa799b7509530c884fe8a53b2a5309785a02e13e44cdeb25" exitCode=0 Jan 20 17:54:52 crc kubenswrapper[4995]: I0120 17:54:52.843418 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"356ca6c0-8604-40b3-b965-af9225ea185f","Type":"ContainerDied","Data":"64a19a3fdb566ad3aa799b7509530c884fe8a53b2a5309785a02e13e44cdeb25"} Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.061847 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215148 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-temporary\") pod \"356ca6c0-8604-40b3-b965-af9225ea185f\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215263 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-config-data\") pod \"356ca6c0-8604-40b3-b965-af9225ea185f\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215294 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ca-certs\") pod \"356ca6c0-8604-40b3-b965-af9225ea185f\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215405 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qz95\" (UniqueName: \"kubernetes.io/projected/356ca6c0-8604-40b3-b965-af9225ea185f-kube-api-access-4qz95\") pod \"356ca6c0-8604-40b3-b965-af9225ea185f\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215442 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config\") pod \"356ca6c0-8604-40b3-b965-af9225ea185f\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215480 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config-secret\") pod \"356ca6c0-8604-40b3-b965-af9225ea185f\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215534 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-workdir\") pod \"356ca6c0-8604-40b3-b965-af9225ea185f\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215566 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ssh-key\") pod \"356ca6c0-8604-40b3-b965-af9225ea185f\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215599 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"356ca6c0-8604-40b3-b965-af9225ea185f\" (UID: \"356ca6c0-8604-40b3-b965-af9225ea185f\") " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.215942 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "356ca6c0-8604-40b3-b965-af9225ea185f" (UID: "356ca6c0-8604-40b3-b965-af9225ea185f"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.216177 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-config-data" (OuterVolumeSpecName: "config-data") pod "356ca6c0-8604-40b3-b965-af9225ea185f" (UID: "356ca6c0-8604-40b3-b965-af9225ea185f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.221271 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "test-operator-logs") pod "356ca6c0-8604-40b3-b965-af9225ea185f" (UID: "356ca6c0-8604-40b3-b965-af9225ea185f"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.221668 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/356ca6c0-8604-40b3-b965-af9225ea185f-kube-api-access-4qz95" (OuterVolumeSpecName: "kube-api-access-4qz95") pod "356ca6c0-8604-40b3-b965-af9225ea185f" (UID: "356ca6c0-8604-40b3-b965-af9225ea185f"). InnerVolumeSpecName "kube-api-access-4qz95". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.284608 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "356ca6c0-8604-40b3-b965-af9225ea185f" (UID: "356ca6c0-8604-40b3-b965-af9225ea185f"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.318658 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.318698 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qz95\" (UniqueName: \"kubernetes.io/projected/356ca6c0-8604-40b3-b965-af9225ea185f-kube-api-access-4qz95\") on node \"crc\" DevicePath \"\"" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.318715 4995 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.318753 4995 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.318767 4995 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/356ca6c0-8604-40b3-b965-af9225ea185f-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.333533 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "356ca6c0-8604-40b3-b965-af9225ea185f" (UID: "356ca6c0-8604-40b3-b965-af9225ea185f"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.333628 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "356ca6c0-8604-40b3-b965-af9225ea185f" (UID: "356ca6c0-8604-40b3-b965-af9225ea185f"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.334935 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "356ca6c0-8604-40b3-b965-af9225ea185f" (UID: "356ca6c0-8604-40b3-b965-af9225ea185f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.355350 4995 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.359396 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "356ca6c0-8604-40b3-b965-af9225ea185f" (UID: "356ca6c0-8604-40b3-b965-af9225ea185f"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.420678 4995 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.420735 4995 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.420753 4995 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.420767 4995 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.420779 4995 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/356ca6c0-8604-40b3-b965-af9225ea185f-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.876631 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"356ca6c0-8604-40b3-b965-af9225ea185f","Type":"ContainerDied","Data":"26e68232d06f08bf7613e032f7be9ff0c9d70928c357fe98f9009b2f14d731f0"} Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.876936 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26e68232d06f08bf7613e032f7be9ff0c9d70928c357fe98f9009b2f14d731f0" Jan 20 17:54:55 crc kubenswrapper[4995]: I0120 17:54:55.876697 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 20 17:54:58 crc kubenswrapper[4995]: I0120 17:54:58.991041 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:54:58 crc kubenswrapper[4995]: E0120 17:54:58.991673 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.222506 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 20 17:55:07 crc kubenswrapper[4995]: E0120 17:55:07.223296 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerName="registry-server" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223310 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerName="registry-server" Jan 20 17:55:07 crc kubenswrapper[4995]: E0120 17:55:07.223323 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerName="extract-utilities" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223330 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerName="extract-utilities" Jan 20 17:55:07 crc kubenswrapper[4995]: E0120 17:55:07.223353 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerName="extract-content" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223361 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerName="extract-content" Jan 20 17:55:07 crc kubenswrapper[4995]: E0120 17:55:07.223372 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerName="registry-server" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223377 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerName="registry-server" Jan 20 17:55:07 crc kubenswrapper[4995]: E0120 17:55:07.223394 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="356ca6c0-8604-40b3-b965-af9225ea185f" containerName="tempest-tests-tempest-tests-runner" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223400 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="356ca6c0-8604-40b3-b965-af9225ea185f" containerName="tempest-tests-tempest-tests-runner" Jan 20 17:55:07 crc kubenswrapper[4995]: E0120 17:55:07.223409 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerName="extract-utilities" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223414 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerName="extract-utilities" Jan 20 17:55:07 crc kubenswrapper[4995]: E0120 17:55:07.223425 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerName="extract-content" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223430 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerName="extract-content" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223594 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="27d4dd2c-e607-46b9-a297-f29ed44caf4f" containerName="registry-server" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223614 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="d50abdf3-9c50-49ea-b698-4cc83fdd524e" containerName="registry-server" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.223627 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="356ca6c0-8604-40b3-b965-af9225ea185f" containerName="tempest-tests-tempest-tests-runner" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.224218 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.226779 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-8fpfv" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.245463 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.380454 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pcm8\" (UniqueName: \"kubernetes.io/projected/d6171ebe-412f-4cf2-839e-785eeeaf714b-kube-api-access-4pcm8\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d6171ebe-412f-4cf2-839e-785eeeaf714b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.380660 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d6171ebe-412f-4cf2-839e-785eeeaf714b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.483353 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d6171ebe-412f-4cf2-839e-785eeeaf714b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.483464 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pcm8\" (UniqueName: \"kubernetes.io/projected/d6171ebe-412f-4cf2-839e-785eeeaf714b-kube-api-access-4pcm8\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d6171ebe-412f-4cf2-839e-785eeeaf714b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 17:55:07 crc kubenswrapper[4995]: I0120 17:55:07.483860 4995 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d6171ebe-412f-4cf2-839e-785eeeaf714b\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 17:55:08 crc kubenswrapper[4995]: I0120 17:55:08.133273 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pcm8\" (UniqueName: \"kubernetes.io/projected/d6171ebe-412f-4cf2-839e-785eeeaf714b-kube-api-access-4pcm8\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d6171ebe-412f-4cf2-839e-785eeeaf714b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 17:55:08 crc kubenswrapper[4995]: I0120 17:55:08.271199 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"d6171ebe-412f-4cf2-839e-785eeeaf714b\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 17:55:08 crc kubenswrapper[4995]: I0120 17:55:08.446782 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 17:55:08 crc kubenswrapper[4995]: I0120 17:55:08.936282 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 20 17:55:09 crc kubenswrapper[4995]: I0120 17:55:09.034520 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"d6171ebe-412f-4cf2-839e-785eeeaf714b","Type":"ContainerStarted","Data":"2a4abd1dd6765e0318a01467f6197db0cb1fa057d7ce8a5a0acbc42607250ca3"} Jan 20 17:55:10 crc kubenswrapper[4995]: I0120 17:55:10.990660 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:55:10 crc kubenswrapper[4995]: E0120 17:55:10.991979 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:55:11 crc kubenswrapper[4995]: I0120 17:55:11.060727 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"d6171ebe-412f-4cf2-839e-785eeeaf714b","Type":"ContainerStarted","Data":"5c4631722f7560f3a14cc5a5eaaff571a36b0f5b78e1579ac0c388ab5d87b46d"} Jan 20 17:55:11 crc kubenswrapper[4995]: I0120 17:55:11.085121 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=3.120284782 podStartE2EDuration="4.085063826s" podCreationTimestamp="2026-01-20 17:55:07 +0000 UTC" firstStartedPulling="2026-01-20 17:55:08.947818996 +0000 UTC m=+5027.192423812" lastFinishedPulling="2026-01-20 17:55:09.91259805 +0000 UTC m=+5028.157202856" observedRunningTime="2026-01-20 17:55:11.08338381 +0000 UTC m=+5029.327988626" watchObservedRunningTime="2026-01-20 17:55:11.085063826 +0000 UTC m=+5029.329668642" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.003613 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4fcqp"] Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.006513 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4fcqp"] Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.006643 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.119557 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075a85ee-fd7b-44e9-a631-840b4fcc96fb-catalog-content\") pod \"certified-operators-4fcqp\" (UID: \"075a85ee-fd7b-44e9-a631-840b4fcc96fb\") " pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.119726 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdm2n\" (UniqueName: \"kubernetes.io/projected/075a85ee-fd7b-44e9-a631-840b4fcc96fb-kube-api-access-mdm2n\") pod \"certified-operators-4fcqp\" (UID: \"075a85ee-fd7b-44e9-a631-840b4fcc96fb\") " pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.119767 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075a85ee-fd7b-44e9-a631-840b4fcc96fb-utilities\") pod \"certified-operators-4fcqp\" (UID: \"075a85ee-fd7b-44e9-a631-840b4fcc96fb\") " pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.221705 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdm2n\" (UniqueName: \"kubernetes.io/projected/075a85ee-fd7b-44e9-a631-840b4fcc96fb-kube-api-access-mdm2n\") pod \"certified-operators-4fcqp\" (UID: \"075a85ee-fd7b-44e9-a631-840b4fcc96fb\") " pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.221775 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075a85ee-fd7b-44e9-a631-840b4fcc96fb-utilities\") pod \"certified-operators-4fcqp\" (UID: \"075a85ee-fd7b-44e9-a631-840b4fcc96fb\") " pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.221892 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075a85ee-fd7b-44e9-a631-840b4fcc96fb-catalog-content\") pod \"certified-operators-4fcqp\" (UID: \"075a85ee-fd7b-44e9-a631-840b4fcc96fb\") " pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.222464 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/075a85ee-fd7b-44e9-a631-840b4fcc96fb-utilities\") pod \"certified-operators-4fcqp\" (UID: \"075a85ee-fd7b-44e9-a631-840b4fcc96fb\") " pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.222495 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/075a85ee-fd7b-44e9-a631-840b4fcc96fb-catalog-content\") pod \"certified-operators-4fcqp\" (UID: \"075a85ee-fd7b-44e9-a631-840b4fcc96fb\") " pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.337989 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdm2n\" (UniqueName: \"kubernetes.io/projected/075a85ee-fd7b-44e9-a631-840b4fcc96fb-kube-api-access-mdm2n\") pod \"certified-operators-4fcqp\" (UID: \"075a85ee-fd7b-44e9-a631-840b4fcc96fb\") " pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.351805 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:18 crc kubenswrapper[4995]: I0120 17:55:18.897200 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4fcqp"] Jan 20 17:55:19 crc kubenswrapper[4995]: I0120 17:55:19.136508 4995 generic.go:334] "Generic (PLEG): container finished" podID="075a85ee-fd7b-44e9-a631-840b4fcc96fb" containerID="b01ea28b1628f9d92e676331efffaa21f5188627beaa0071f82c1a81d17da81d" exitCode=0 Jan 20 17:55:19 crc kubenswrapper[4995]: I0120 17:55:19.136705 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fcqp" event={"ID":"075a85ee-fd7b-44e9-a631-840b4fcc96fb","Type":"ContainerDied","Data":"b01ea28b1628f9d92e676331efffaa21f5188627beaa0071f82c1a81d17da81d"} Jan 20 17:55:19 crc kubenswrapper[4995]: I0120 17:55:19.136849 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fcqp" event={"ID":"075a85ee-fd7b-44e9-a631-840b4fcc96fb","Type":"ContainerStarted","Data":"c6ad5cdd2bf4fa242fe8714b14f2aa76d45fdc5c139de4b676314bf56dc45b90"} Jan 20 17:55:22 crc kubenswrapper[4995]: I0120 17:55:22.989309 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:55:22 crc kubenswrapper[4995]: E0120 17:55:22.990016 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:55:24 crc kubenswrapper[4995]: I0120 17:55:24.195604 4995 generic.go:334] "Generic (PLEG): container finished" podID="075a85ee-fd7b-44e9-a631-840b4fcc96fb" containerID="ca6ab7d72a881199b1eac947d8f5467045f98476b38ce82dfa9acd97a6c7e82b" exitCode=0 Jan 20 17:55:24 crc kubenswrapper[4995]: I0120 17:55:24.195697 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fcqp" event={"ID":"075a85ee-fd7b-44e9-a631-840b4fcc96fb","Type":"ContainerDied","Data":"ca6ab7d72a881199b1eac947d8f5467045f98476b38ce82dfa9acd97a6c7e82b"} Jan 20 17:55:25 crc kubenswrapper[4995]: I0120 17:55:25.207416 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4fcqp" event={"ID":"075a85ee-fd7b-44e9-a631-840b4fcc96fb","Type":"ContainerStarted","Data":"2f8426a01ed08ee47d4a78036d2f5c3f05652eb0a03d712f23ea51c99e80e681"} Jan 20 17:55:25 crc kubenswrapper[4995]: I0120 17:55:25.233341 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4fcqp" podStartSLOduration=2.694614089 podStartE2EDuration="8.233318127s" podCreationTimestamp="2026-01-20 17:55:17 +0000 UTC" firstStartedPulling="2026-01-20 17:55:19.138516655 +0000 UTC m=+5037.383121461" lastFinishedPulling="2026-01-20 17:55:24.677220693 +0000 UTC m=+5042.921825499" observedRunningTime="2026-01-20 17:55:25.229231736 +0000 UTC m=+5043.473836542" watchObservedRunningTime="2026-01-20 17:55:25.233318127 +0000 UTC m=+5043.477922953" Jan 20 17:55:28 crc kubenswrapper[4995]: I0120 17:55:28.352902 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:28 crc kubenswrapper[4995]: I0120 17:55:28.354214 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:28 crc kubenswrapper[4995]: I0120 17:55:28.408895 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:29 crc kubenswrapper[4995]: I0120 17:55:29.573837 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4fcqp" Jan 20 17:55:29 crc kubenswrapper[4995]: I0120 17:55:29.638283 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4fcqp"] Jan 20 17:55:29 crc kubenswrapper[4995]: I0120 17:55:29.729298 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4w5vv"] Jan 20 17:55:29 crc kubenswrapper[4995]: I0120 17:55:29.729542 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4w5vv" podUID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerName="registry-server" containerID="cri-o://50b909571ee719e4856ddd25a2b7ef9226da80ad57513600916f0e4b6f4228dc" gracePeriod=2 Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.254309 4995 generic.go:334] "Generic (PLEG): container finished" podID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerID="50b909571ee719e4856ddd25a2b7ef9226da80ad57513600916f0e4b6f4228dc" exitCode=0 Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.254383 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4w5vv" event={"ID":"92414b60-e9e0-45ef-91ab-8ce0734f081b","Type":"ContainerDied","Data":"50b909571ee719e4856ddd25a2b7ef9226da80ad57513600916f0e4b6f4228dc"} Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.721378 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.793903 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-catalog-content\") pod \"92414b60-e9e0-45ef-91ab-8ce0734f081b\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.794337 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-utilities\") pod \"92414b60-e9e0-45ef-91ab-8ce0734f081b\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.794469 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9n9k\" (UniqueName: \"kubernetes.io/projected/92414b60-e9e0-45ef-91ab-8ce0734f081b-kube-api-access-g9n9k\") pod \"92414b60-e9e0-45ef-91ab-8ce0734f081b\" (UID: \"92414b60-e9e0-45ef-91ab-8ce0734f081b\") " Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.796314 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-utilities" (OuterVolumeSpecName: "utilities") pod "92414b60-e9e0-45ef-91ab-8ce0734f081b" (UID: "92414b60-e9e0-45ef-91ab-8ce0734f081b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.801725 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92414b60-e9e0-45ef-91ab-8ce0734f081b-kube-api-access-g9n9k" (OuterVolumeSpecName: "kube-api-access-g9n9k") pod "92414b60-e9e0-45ef-91ab-8ce0734f081b" (UID: "92414b60-e9e0-45ef-91ab-8ce0734f081b"). InnerVolumeSpecName "kube-api-access-g9n9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.868071 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92414b60-e9e0-45ef-91ab-8ce0734f081b" (UID: "92414b60-e9e0-45ef-91ab-8ce0734f081b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.896761 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.896800 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9n9k\" (UniqueName: \"kubernetes.io/projected/92414b60-e9e0-45ef-91ab-8ce0734f081b-kube-api-access-g9n9k\") on node \"crc\" DevicePath \"\"" Jan 20 17:55:30 crc kubenswrapper[4995]: I0120 17:55:30.896814 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92414b60-e9e0-45ef-91ab-8ce0734f081b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 17:55:31 crc kubenswrapper[4995]: I0120 17:55:31.264276 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4w5vv" event={"ID":"92414b60-e9e0-45ef-91ab-8ce0734f081b","Type":"ContainerDied","Data":"30fcd77cc111fd01f44c34e76f725d8a0ede1c9396ce2b041be0e5f6689661e7"} Jan 20 17:55:31 crc kubenswrapper[4995]: I0120 17:55:31.264318 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4w5vv" Jan 20 17:55:31 crc kubenswrapper[4995]: I0120 17:55:31.264351 4995 scope.go:117] "RemoveContainer" containerID="50b909571ee719e4856ddd25a2b7ef9226da80ad57513600916f0e4b6f4228dc" Jan 20 17:55:31 crc kubenswrapper[4995]: I0120 17:55:31.289752 4995 scope.go:117] "RemoveContainer" containerID="bc932303901d904876a571d44475672fae0db58106b30fa7eceb4848a91e7174" Jan 20 17:55:31 crc kubenswrapper[4995]: I0120 17:55:31.305842 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4w5vv"] Jan 20 17:55:31 crc kubenswrapper[4995]: I0120 17:55:31.318675 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4w5vv"] Jan 20 17:55:31 crc kubenswrapper[4995]: I0120 17:55:31.353959 4995 scope.go:117] "RemoveContainer" containerID="65ab553209b4a826764656ac59331d1c228979e6f1f61bdd2e476e5e7342e4e4" Jan 20 17:55:32 crc kubenswrapper[4995]: I0120 17:55:31.999783 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92414b60-e9e0-45ef-91ab-8ce0734f081b" path="/var/lib/kubelet/pods/92414b60-e9e0-45ef-91ab-8ce0734f081b/volumes" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.926317 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rvzk2/must-gather-l4p9s"] Jan 20 17:55:34 crc kubenswrapper[4995]: E0120 17:55:34.926922 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerName="extract-utilities" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.926934 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerName="extract-utilities" Jan 20 17:55:34 crc kubenswrapper[4995]: E0120 17:55:34.926952 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerName="extract-content" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.926957 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerName="extract-content" Jan 20 17:55:34 crc kubenswrapper[4995]: E0120 17:55:34.926977 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerName="registry-server" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.926983 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerName="registry-server" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.927168 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="92414b60-e9e0-45ef-91ab-8ce0734f081b" containerName="registry-server" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.928095 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/must-gather-l4p9s" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.932177 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-rvzk2"/"default-dockercfg-t8xfm" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.932368 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rvzk2"/"kube-root-ca.crt" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.934160 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-rvzk2"/"openshift-service-ca.crt" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.940764 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rvzk2/must-gather-l4p9s"] Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.983774 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b959c436-4766-477b-b24b-833813bb88cd-must-gather-output\") pod \"must-gather-l4p9s\" (UID: \"b959c436-4766-477b-b24b-833813bb88cd\") " pod="openshift-must-gather-rvzk2/must-gather-l4p9s" Jan 20 17:55:34 crc kubenswrapper[4995]: I0120 17:55:34.984282 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8gpp\" (UniqueName: \"kubernetes.io/projected/b959c436-4766-477b-b24b-833813bb88cd-kube-api-access-s8gpp\") pod \"must-gather-l4p9s\" (UID: \"b959c436-4766-477b-b24b-833813bb88cd\") " pod="openshift-must-gather-rvzk2/must-gather-l4p9s" Jan 20 17:55:35 crc kubenswrapper[4995]: I0120 17:55:35.085939 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b959c436-4766-477b-b24b-833813bb88cd-must-gather-output\") pod \"must-gather-l4p9s\" (UID: \"b959c436-4766-477b-b24b-833813bb88cd\") " pod="openshift-must-gather-rvzk2/must-gather-l4p9s" Jan 20 17:55:35 crc kubenswrapper[4995]: I0120 17:55:35.086089 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8gpp\" (UniqueName: \"kubernetes.io/projected/b959c436-4766-477b-b24b-833813bb88cd-kube-api-access-s8gpp\") pod \"must-gather-l4p9s\" (UID: \"b959c436-4766-477b-b24b-833813bb88cd\") " pod="openshift-must-gather-rvzk2/must-gather-l4p9s" Jan 20 17:55:35 crc kubenswrapper[4995]: I0120 17:55:35.086698 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b959c436-4766-477b-b24b-833813bb88cd-must-gather-output\") pod \"must-gather-l4p9s\" (UID: \"b959c436-4766-477b-b24b-833813bb88cd\") " pod="openshift-must-gather-rvzk2/must-gather-l4p9s" Jan 20 17:55:35 crc kubenswrapper[4995]: I0120 17:55:35.111308 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8gpp\" (UniqueName: \"kubernetes.io/projected/b959c436-4766-477b-b24b-833813bb88cd-kube-api-access-s8gpp\") pod \"must-gather-l4p9s\" (UID: \"b959c436-4766-477b-b24b-833813bb88cd\") " pod="openshift-must-gather-rvzk2/must-gather-l4p9s" Jan 20 17:55:35 crc kubenswrapper[4995]: I0120 17:55:35.244966 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/must-gather-l4p9s" Jan 20 17:55:35 crc kubenswrapper[4995]: W0120 17:55:35.926436 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb959c436_4766_477b_b24b_833813bb88cd.slice/crio-29a8a676498e31b5217b11c56373ef319586f3ca29cd21d9795dd8959bfcd9e1 WatchSource:0}: Error finding container 29a8a676498e31b5217b11c56373ef319586f3ca29cd21d9795dd8959bfcd9e1: Status 404 returned error can't find the container with id 29a8a676498e31b5217b11c56373ef319586f3ca29cd21d9795dd8959bfcd9e1 Jan 20 17:55:35 crc kubenswrapper[4995]: I0120 17:55:35.931817 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-rvzk2/must-gather-l4p9s"] Jan 20 17:55:36 crc kubenswrapper[4995]: I0120 17:55:36.336395 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/must-gather-l4p9s" event={"ID":"b959c436-4766-477b-b24b-833813bb88cd","Type":"ContainerStarted","Data":"29a8a676498e31b5217b11c56373ef319586f3ca29cd21d9795dd8959bfcd9e1"} Jan 20 17:55:36 crc kubenswrapper[4995]: I0120 17:55:36.990233 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:55:36 crc kubenswrapper[4995]: E0120 17:55:36.990483 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:55:41 crc kubenswrapper[4995]: I0120 17:55:41.397977 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/must-gather-l4p9s" event={"ID":"b959c436-4766-477b-b24b-833813bb88cd","Type":"ContainerStarted","Data":"baabfdb2ff7b0f7191d3c103d570ce36155395936ab44fcafb184851fa198aab"} Jan 20 17:55:41 crc kubenswrapper[4995]: I0120 17:55:41.398701 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/must-gather-l4p9s" event={"ID":"b959c436-4766-477b-b24b-833813bb88cd","Type":"ContainerStarted","Data":"3808521a881df88024b7462e559c21b441e0766ef538c969237785fcfd1bbf1a"} Jan 20 17:55:41 crc kubenswrapper[4995]: I0120 17:55:41.422657 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rvzk2/must-gather-l4p9s" podStartSLOduration=3.020780387 podStartE2EDuration="7.422630835s" podCreationTimestamp="2026-01-20 17:55:34 +0000 UTC" firstStartedPulling="2026-01-20 17:55:35.929055714 +0000 UTC m=+5054.173660520" lastFinishedPulling="2026-01-20 17:55:40.330906162 +0000 UTC m=+5058.575510968" observedRunningTime="2026-01-20 17:55:41.420573189 +0000 UTC m=+5059.665178035" watchObservedRunningTime="2026-01-20 17:55:41.422630835 +0000 UTC m=+5059.667235671" Jan 20 17:55:44 crc kubenswrapper[4995]: I0120 17:55:44.920925 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rvzk2/crc-debug-4pkqc"] Jan 20 17:55:44 crc kubenswrapper[4995]: I0120 17:55:44.923808 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:55:45 crc kubenswrapper[4995]: I0120 17:55:45.025720 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5d9cfc6-6ade-44d1-b909-8f278727027f-host\") pod \"crc-debug-4pkqc\" (UID: \"f5d9cfc6-6ade-44d1-b909-8f278727027f\") " pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:55:45 crc kubenswrapper[4995]: I0120 17:55:45.025845 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vql66\" (UniqueName: \"kubernetes.io/projected/f5d9cfc6-6ade-44d1-b909-8f278727027f-kube-api-access-vql66\") pod \"crc-debug-4pkqc\" (UID: \"f5d9cfc6-6ade-44d1-b909-8f278727027f\") " pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:55:45 crc kubenswrapper[4995]: I0120 17:55:45.128128 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vql66\" (UniqueName: \"kubernetes.io/projected/f5d9cfc6-6ade-44d1-b909-8f278727027f-kube-api-access-vql66\") pod \"crc-debug-4pkqc\" (UID: \"f5d9cfc6-6ade-44d1-b909-8f278727027f\") " pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:55:45 crc kubenswrapper[4995]: I0120 17:55:45.128273 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5d9cfc6-6ade-44d1-b909-8f278727027f-host\") pod \"crc-debug-4pkqc\" (UID: \"f5d9cfc6-6ade-44d1-b909-8f278727027f\") " pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:55:45 crc kubenswrapper[4995]: I0120 17:55:45.128895 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5d9cfc6-6ade-44d1-b909-8f278727027f-host\") pod \"crc-debug-4pkqc\" (UID: \"f5d9cfc6-6ade-44d1-b909-8f278727027f\") " pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:55:45 crc kubenswrapper[4995]: I0120 17:55:45.155815 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vql66\" (UniqueName: \"kubernetes.io/projected/f5d9cfc6-6ade-44d1-b909-8f278727027f-kube-api-access-vql66\") pod \"crc-debug-4pkqc\" (UID: \"f5d9cfc6-6ade-44d1-b909-8f278727027f\") " pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:55:45 crc kubenswrapper[4995]: I0120 17:55:45.241529 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:55:45 crc kubenswrapper[4995]: I0120 17:55:45.448176 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" event={"ID":"f5d9cfc6-6ade-44d1-b909-8f278727027f","Type":"ContainerStarted","Data":"91dc3a0fea9dda2b6d881c3620d7a8b88c84c50b89b297c3fcc70a933316765d"} Jan 20 17:55:47 crc kubenswrapper[4995]: I0120 17:55:47.769054 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-674779b598-44vdg_87360161-2c16-453b-bfeb-649cd107fdf0/barbican-api-log/0.log" Jan 20 17:55:47 crc kubenswrapper[4995]: I0120 17:55:47.791108 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-674779b598-44vdg_87360161-2c16-453b-bfeb-649cd107fdf0/barbican-api/0.log" Jan 20 17:55:47 crc kubenswrapper[4995]: I0120 17:55:47.848267 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-76684d5cb6-ln2nh_82750a54-2446-49e7-8251-7ae6f228dc49/barbican-keystone-listener-log/0.log" Jan 20 17:55:47 crc kubenswrapper[4995]: I0120 17:55:47.856916 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-76684d5cb6-ln2nh_82750a54-2446-49e7-8251-7ae6f228dc49/barbican-keystone-listener/0.log" Jan 20 17:55:47 crc kubenswrapper[4995]: I0120 17:55:47.874304 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-b68c6cc67-mvcbt_2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5/barbican-worker-log/0.log" Jan 20 17:55:47 crc kubenswrapper[4995]: I0120 17:55:47.878781 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-b68c6cc67-mvcbt_2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5/barbican-worker/0.log" Jan 20 17:55:47 crc kubenswrapper[4995]: I0120 17:55:47.933966 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-g484b_7426fa32-40ee-4b5e-9d5a-962505929c91/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:55:47 crc kubenswrapper[4995]: I0120 17:55:47.986735 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_31630944-4dd8-4460-b8b3-d87157e2a0ef/ceilometer-central-agent/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.014093 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_31630944-4dd8-4460-b8b3-d87157e2a0ef/ceilometer-notification-agent/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.020700 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_31630944-4dd8-4460-b8b3-d87157e2a0ef/sg-core/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.043850 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_31630944-4dd8-4460-b8b3-d87157e2a0ef/proxy-httpd/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.063502 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_59abbdb2-429a-473e-ae6b-8f731b6cf17d/cinder-api-log/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.167802 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_59abbdb2-429a-473e-ae6b-8f731b6cf17d/cinder-api/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.210941 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_600e23cc-0af2-4f67-a17b-a69f4753f7f5/cinder-scheduler/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.295248 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_600e23cc-0af2-4f67-a17b-a69f4753f7f5/probe/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.336098 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-t744h_6d2d90c1-a32f-4ec1-82e9-4d4440542e43/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.359039 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-j47qj_b46e1f63-68f8-4cb0-835d-5d35ece39037/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.524589 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-b2hmj_4d44782e-c760-4297-8d8b-8e87526ffbdb/dnsmasq-dns/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.530584 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-b2hmj_4d44782e-c760-4297-8d8b-8e87526ffbdb/init/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.564547 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq_d73cf0b7-6fb8-4b4c-b6bd-acb174f44890/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.580000 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_4a7fd66d-0211-429d-8dfa-7a29ca98ab51/glance-log/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.610860 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_4a7fd66d-0211-429d-8dfa-7a29ca98ab51/glance-httpd/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.625521 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_b40b3bf2-fecd-4b7b-8110-7f15651792f3/glance-log/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.649029 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_b40b3bf2-fecd-4b7b-8110-7f15651792f3/glance-httpd/0.log" Jan 20 17:55:48 crc kubenswrapper[4995]: I0120 17:55:48.989540 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:55:48 crc kubenswrapper[4995]: E0120 17:55:48.990918 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:55:49 crc kubenswrapper[4995]: I0120 17:55:49.248246 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7cd588cc5b-pmhlg_83a7df1c-c59a-4a4c-b34d-df9fc6711aea/horizon-log/0.log" Jan 20 17:55:49 crc kubenswrapper[4995]: I0120 17:55:49.371249 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7cd588cc5b-pmhlg_83a7df1c-c59a-4a4c-b34d-df9fc6711aea/horizon/0.log" Jan 20 17:55:49 crc kubenswrapper[4995]: I0120 17:55:49.401687 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z_f8079eec-4ec7-4979-8cd9-531c61418782/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:55:49 crc kubenswrapper[4995]: I0120 17:55:49.431587 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-rnrwr_f6888ab8-4be9-45c2-b50d-46927fd64cba/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:55:49 crc kubenswrapper[4995]: I0120 17:55:49.883299 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7f99b88f98-w6ztm_d2343e74-3182-46e7-b4d2-7d9c35964fab/keystone-api/0.log" Jan 20 17:55:49 crc kubenswrapper[4995]: I0120 17:55:49.918795 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29482141-gpst2_f2050c7c-ffc8-4deb-89d8-f6cc0ee15601/keystone-cron/0.log" Jan 20 17:55:49 crc kubenswrapper[4995]: I0120 17:55:49.933324 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_0e5570d0-a1c7-46f5-a5f6-529ad06cf05f/kube-state-metrics/0.log" Jan 20 17:55:49 crc kubenswrapper[4995]: I0120 17:55:49.987648 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz_34c57bed-2d89-4ce3-9613-eb3ec4fb222b/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:55:58 crc kubenswrapper[4995]: I0120 17:55:58.568691 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" event={"ID":"f5d9cfc6-6ade-44d1-b909-8f278727027f","Type":"ContainerStarted","Data":"6a1c7d767c782a0f2feaec25f9a7b96c09954baddcb0d1e1f790b0a43078f9f6"} Jan 20 17:55:58 crc kubenswrapper[4995]: I0120 17:55:58.579915 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" podStartSLOduration=2.411269712 podStartE2EDuration="14.579894915s" podCreationTimestamp="2026-01-20 17:55:44 +0000 UTC" firstStartedPulling="2026-01-20 17:55:45.283174661 +0000 UTC m=+5063.527779467" lastFinishedPulling="2026-01-20 17:55:57.451799864 +0000 UTC m=+5075.696404670" observedRunningTime="2026-01-20 17:55:58.579193095 +0000 UTC m=+5076.823797901" watchObservedRunningTime="2026-01-20 17:55:58.579894915 +0000 UTC m=+5076.824499721" Jan 20 17:55:59 crc kubenswrapper[4995]: I0120 17:55:59.681486 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_f951f50c-486d-4038-a43a-4d40fa1812de/memcached/0.log" Jan 20 17:55:59 crc kubenswrapper[4995]: I0120 17:55:59.873364 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7857b9874f-85h9n_121cce9d-e190-44bf-b332-7b268c2ffd26/neutron-api/0.log" Jan 20 17:55:59 crc kubenswrapper[4995]: I0120 17:55:59.961162 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7857b9874f-85h9n_121cce9d-e190-44bf-b332-7b268c2ffd26/neutron-httpd/0.log" Jan 20 17:55:59 crc kubenswrapper[4995]: I0120 17:55:59.987433 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx_1fd4a738-bf40-4e76-9ee2-79a8042e7c07/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:56:00 crc kubenswrapper[4995]: I0120 17:56:00.587009 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d6dd58d8-2dec-4e9a-b9b7-78f585378448/nova-api-log/0.log" Jan 20 17:56:01 crc kubenswrapper[4995]: I0120 17:56:01.323198 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d6dd58d8-2dec-4e9a-b9b7-78f585378448/nova-api-api/0.log" Jan 20 17:56:01 crc kubenswrapper[4995]: I0120 17:56:01.465105 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_18edb0dd-f0be-4f0e-b860-cf6cc5b67745/nova-cell0-conductor-conductor/0.log" Jan 20 17:56:01 crc kubenswrapper[4995]: I0120 17:56:01.556020 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_68ec0231-a7a1-45be-afbf-e66cd2a68d38/nova-cell1-conductor-conductor/0.log" Jan 20 17:56:01 crc kubenswrapper[4995]: I0120 17:56:01.670769 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_2a0df207-0ec6-420e-9f84-7ea1d4e6b469/nova-cell1-novncproxy-novncproxy/0.log" Jan 20 17:56:01 crc kubenswrapper[4995]: I0120 17:56:01.730865 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-f6vqc_a463f304-1432-497f-9f19-3cd3b4d05da2/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:56:01 crc kubenswrapper[4995]: I0120 17:56:01.808275 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4a499d87-fe94-4606-85e0-a225b12773f7/nova-metadata-log/0.log" Jan 20 17:56:01 crc kubenswrapper[4995]: I0120 17:56:01.998384 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:56:01 crc kubenswrapper[4995]: E0120 17:56:01.998710 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.203642 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4a499d87-fe94-4606-85e0-a225b12773f7/nova-metadata-metadata/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.377363 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_b776e369-c664-4e5e-a256-b5a1725c0142/nova-scheduler-scheduler/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.402270 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a68274bb-aba1-4c92-85ae-8e043d5ac325/galera/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.412885 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a68274bb-aba1-4c92-85ae-8e043d5ac325/mysql-bootstrap/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.438538 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6/galera/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.448545 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6/mysql-bootstrap/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.455625 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_82097345-279c-4f86-ad0d-29cd82acf859/openstackclient/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.470684 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-vfjkw_5e655fe2-263f-4d77-b9fd-af0528012527/openstack-network-exporter/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.487487 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-q9nkf_028bd686-8a70-4866-968f-c29ab470e44c/ovsdb-server/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.499738 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-q9nkf_028bd686-8a70-4866-968f-c29ab470e44c/ovs-vswitchd/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.505945 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-q9nkf_028bd686-8a70-4866-968f-c29ab470e44c/ovsdb-server-init/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.523000 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-spc7x_54be3683-2d75-43fd-8301-e05b2a5103cc/ovn-controller/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.554162 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-nwtzl_2c510d64-d6b7-41c0-a293-4528282415ec/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.568373 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c79dd22d-f0b7-4102-a740-1e5c88a5a548/ovn-northd/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.575805 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c79dd22d-f0b7-4102-a740-1e5c88a5a548/openstack-network-exporter/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.589806 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_929cb9df-f5d9-4b0b-972c-5b79b6e28ab8/ovsdbserver-nb/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.597571 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_929cb9df-f5d9-4b0b-972c-5b79b6e28ab8/openstack-network-exporter/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.614335 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_9283bc9e-66ee-4ded-b64e-3bdca7f112b4/ovsdbserver-sb/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.621663 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_9283bc9e-66ee-4ded-b64e-3bdca7f112b4/openstack-network-exporter/0.log" Jan 20 17:56:03 crc kubenswrapper[4995]: I0120 17:56:03.796798 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-679d748c9b-mrbbx_ff10efe7-680b-4d4a-a950-e2a7dfbd24a1/placement-log/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.087056 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-679d748c9b-mrbbx_ff10efe7-680b-4d4a-a950-e2a7dfbd24a1/placement-api/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.107458 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_69015998-1253-4181-99d4-1ea45f6ca788/prometheus/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.111867 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_69015998-1253-4181-99d4-1ea45f6ca788/config-reloader/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.117466 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_69015998-1253-4181-99d4-1ea45f6ca788/thanos-sidecar/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.124496 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_69015998-1253-4181-99d4-1ea45f6ca788/init-config-reloader/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.153400 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_a10b59cc-41b2-49f9-ba12-2bdb82b568f7/rabbitmq/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.159369 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_a10b59cc-41b2-49f9-ba12-2bdb82b568f7/setup-container/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.193813 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cfa14e5d-418a-4eed-96fe-fef4b2a88543/rabbitmq/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.217757 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cfa14e5d-418a-4eed-96fe-fef4b2a88543/setup-container/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.236003 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7_152fb470-c7e5-4e8f-86b4-5e816d021183/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.252634 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-tf4fv_7e4edc52-6ba0-441c-abeb-a7f17b0cb31f/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.277643 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz_0b6494de-6466-4ecf-99d4-e410e3829130/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.312552 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-46zxc_bcd29c3f-dbeb-439f-98f8-7d4aa39597d4/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.329549 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-kr7jc_341c1f6f-c1d4-49a7-8980-7a6f9df0c216/ssh-known-hosts-edpm-deployment/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.464969 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6f5d884999-jxjqt_6da8401d-a15a-4ff6-ab0f-11cbafff0855/proxy-httpd/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.478685 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6f5d884999-jxjqt_6da8401d-a15a-4ff6-ab0f-11cbafff0855/proxy-server/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.485910 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-nmjp6_f955d94a-612b-4962-9745-ac012f2398b2/swift-ring-rebalance/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.515919 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/account-server/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.539774 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/account-replicator/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.544746 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/account-auditor/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.550983 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/account-reaper/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.556978 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/container-server/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.600284 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/container-replicator/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.607261 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/container-auditor/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.618356 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/container-updater/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.627181 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-server/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.651438 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-replicator/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.670988 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-auditor/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.678813 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-updater/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.689692 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-expirer/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.695095 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/rsync/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.703915 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/swift-recon-cron/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.768212 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7_896e00af-dc03-4ed9-b3e7-314eaf50d3b9/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.792925 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_356ca6c0-8604-40b3-b965-af9225ea185f/tempest-tests-tempest-tests-runner/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.799197 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_d6171ebe-412f-4cf2-839e-785eeeaf714b/test-operator-logs-container/0.log" Jan 20 17:56:04 crc kubenswrapper[4995]: I0120 17:56:04.815041 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5_78a82208-d087-4194-ab1e-c3df98c3321e/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 17:56:05 crc kubenswrapper[4995]: I0120 17:56:05.336061 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_e797cd4f-fdf8-485b-94e6-2a1105dedb71/watcher-api-log/0.log" Jan 20 17:56:07 crc kubenswrapper[4995]: I0120 17:56:07.878347 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_e797cd4f-fdf8-485b-94e6-2a1105dedb71/watcher-api/0.log" Jan 20 17:56:08 crc kubenswrapper[4995]: I0120 17:56:08.075023 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_fe959b99-aa94-41d7-aefa-6e6803a337cf/watcher-applier/0.log" Jan 20 17:56:08 crc kubenswrapper[4995]: I0120 17:56:08.661645 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_f3461cc0-9ae2-4e3b-a0ba-070e6273cba0/watcher-decision-engine/0.log" Jan 20 17:56:12 crc kubenswrapper[4995]: I0120 17:56:12.054603 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/controller/0.log" Jan 20 17:56:12 crc kubenswrapper[4995]: I0120 17:56:12.060961 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/kube-rbac-proxy/0.log" Jan 20 17:56:12 crc kubenswrapper[4995]: I0120 17:56:12.081609 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/controller/0.log" Jan 20 17:56:12 crc kubenswrapper[4995]: I0120 17:56:12.990366 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:56:12 crc kubenswrapper[4995]: E0120 17:56:12.990795 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.601130 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.610264 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/reloader/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.615239 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr-metrics/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.627891 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.637815 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy-frr/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.645936 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-frr-files/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.653678 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-reloader/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.660928 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-metrics/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.675490 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-zwjrk_473c4019-d6be-4420-a678-d18999ddbe1c/frr-k8s-webhook-server/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.705431 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6dd7779458-w2rt4_8b17b582-a06b-4ece-b513-7f826c838f6f/manager/0.log" Jan 20 17:56:13 crc kubenswrapper[4995]: I0120 17:56:13.719797 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5c6d4b5599-f8tsv_7fae7627-5782-4525-ba17-4507d15764cd/webhook-server/0.log" Jan 20 17:56:14 crc kubenswrapper[4995]: I0120 17:56:14.163069 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/speaker/0.log" Jan 20 17:56:14 crc kubenswrapper[4995]: I0120 17:56:14.169221 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/kube-rbac-proxy/0.log" Jan 20 17:56:23 crc kubenswrapper[4995]: I0120 17:56:23.989763 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:56:23 crc kubenswrapper[4995]: E0120 17:56:23.990456 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:56:35 crc kubenswrapper[4995]: I0120 17:56:35.651796 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-7zdch_c0a3e997-8709-444b-ae4e-8fc34b04cb6e/manager/0.log" Jan 20 17:56:35 crc kubenswrapper[4995]: I0120 17:56:35.696357 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-zgvcz_3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a/manager/0.log" Jan 20 17:56:35 crc kubenswrapper[4995]: I0120 17:56:35.707694 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-mj76w_9f302bf3-1501-44cc-924c-2e5c42c0eb58/manager/0.log" Jan 20 17:56:35 crc kubenswrapper[4995]: I0120 17:56:35.723899 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/extract/0.log" Jan 20 17:56:35 crc kubenswrapper[4995]: I0120 17:56:35.732721 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/util/0.log" Jan 20 17:56:35 crc kubenswrapper[4995]: I0120 17:56:35.749845 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/pull/0.log" Jan 20 17:56:35 crc kubenswrapper[4995]: I0120 17:56:35.828751 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-bxm9j_072647c8-2d0e-4716-bb29-a87e3ff5cd29/manager/0.log" Jan 20 17:56:35 crc kubenswrapper[4995]: I0120 17:56:35.838271 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-wm2kb_49392c07-237b-447e-a126-f06e1cbf32a2/manager/0.log" Jan 20 17:56:35 crc kubenswrapper[4995]: I0120 17:56:35.875833 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-zs4nf_c8061771-759d-49d5-b88b-9d66f45277ac/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.148312 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-zd75z_439ab902-28ff-48a4-81e4-93c72937e573/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.162952 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-7p5v4_f4577775-2c19-495a-95e7-1638f359b533/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.228862 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-dwn52_a3c2211e-845d-47cc-b4a5-962340b0d53c/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.241019 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-wjrpf_93ac6eeb-0456-4cfe-8298-b8b97d09716c/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.280431 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-qd2nk_47ec26a3-41ca-482f-b539-c9dc32af0bb0/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.326754 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-m7p7b_86d4f806-c5e4-4ce0-a859-5e104b0d5dce/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.419865 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-95tbl_ffe39c73-665e-4de6-afb5-2e9b93419e33/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.430610 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-fk7x2_de6fc9c2-f9a9-41fd-8cfb-b0493d823c20/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.444660 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9_50e51652-8f18-4234-b29b-85e684e63bfd/manager/0.log" Jan 20 17:56:36 crc kubenswrapper[4995]: I0120 17:56:36.545691 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5c987874f9-t2thd_6bd0aa66-ff4d-43ff-925d-e3ead5943058/operator/0.log" Jan 20 17:56:37 crc kubenswrapper[4995]: I0120 17:56:37.770746 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7696897b84-8gt6d_bb15a8a1-9d6b-4032-9ecb-71719f2b3d91/manager/0.log" Jan 20 17:56:37 crc kubenswrapper[4995]: I0120 17:56:37.779666 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-r87vf_3e43abde-a2a7-4334-a3a2-7859aad1a87b/registry-server/0.log" Jan 20 17:56:37 crc kubenswrapper[4995]: I0120 17:56:37.831504 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-vvwk8_9d2f128c-9463-4735-9bf7-91bff7148887/manager/0.log" Jan 20 17:56:37 crc kubenswrapper[4995]: I0120 17:56:37.861323 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-6rn8v_37f347f2-1ab4-4e49-9340-57a960ff8eb1/manager/0.log" Jan 20 17:56:37 crc kubenswrapper[4995]: I0120 17:56:37.885259 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-jwzhh_31bd181f-39ff-4e9f-949c-8a6ed84f3f42/operator/0.log" Jan 20 17:56:37 crc kubenswrapper[4995]: I0120 17:56:37.910584 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-dxnvv_d0afd012-c6e1-4a66-a8a1-9edccfdff278/manager/0.log" Jan 20 17:56:37 crc kubenswrapper[4995]: I0120 17:56:37.989161 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:56:37 crc kubenswrapper[4995]: E0120 17:56:37.989506 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:56:38 crc kubenswrapper[4995]: I0120 17:56:38.076542 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-fdpgr_c2d307fa-2be9-4f04-8ae4-f3b55e987ceb/manager/0.log" Jan 20 17:56:38 crc kubenswrapper[4995]: I0120 17:56:38.086797 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-csd5m_8a04fc71-9575-4cf5-bdab-2c741002c47f/manager/0.log" Jan 20 17:56:38 crc kubenswrapper[4995]: I0120 17:56:38.140939 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6db9b5db6c-29hz8_17dfb7c9-6832-48d3-ad83-91508cf85de3/manager/0.log" Jan 20 17:56:41 crc kubenswrapper[4995]: E0120 17:56:41.923783 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5d9cfc6_6ade_44d1_b909_8f278727027f.slice/crio-6a1c7d767c782a0f2feaec25f9a7b96c09954baddcb0d1e1f790b0a43078f9f6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5d9cfc6_6ade_44d1_b909_8f278727027f.slice/crio-conmon-6a1c7d767c782a0f2feaec25f9a7b96c09954baddcb0d1e1f790b0a43078f9f6.scope\": RecentStats: unable to find data in memory cache]" Jan 20 17:56:41 crc kubenswrapper[4995]: I0120 17:56:41.990253 4995 generic.go:334] "Generic (PLEG): container finished" podID="f5d9cfc6-6ade-44d1-b909-8f278727027f" containerID="6a1c7d767c782a0f2feaec25f9a7b96c09954baddcb0d1e1f790b0a43078f9f6" exitCode=0 Jan 20 17:56:41 crc kubenswrapper[4995]: I0120 17:56:41.999917 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" event={"ID":"f5d9cfc6-6ade-44d1-b909-8f278727027f","Type":"ContainerDied","Data":"6a1c7d767c782a0f2feaec25f9a7b96c09954baddcb0d1e1f790b0a43078f9f6"} Jan 20 17:56:42 crc kubenswrapper[4995]: I0120 17:56:42.064340 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-xxvtq_5573e17e-4b7e-4afd-8608-e8afd1c98256/control-plane-machine-set-operator/0.log" Jan 20 17:56:42 crc kubenswrapper[4995]: I0120 17:56:42.078112 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l4kss_2585d1e6-a851-4ecc-8acd-8fd3d2426576/kube-rbac-proxy/0.log" Jan 20 17:56:42 crc kubenswrapper[4995]: I0120 17:56:42.084522 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l4kss_2585d1e6-a851-4ecc-8acd-8fd3d2426576/machine-api-operator/0.log" Jan 20 17:56:43 crc kubenswrapper[4995]: I0120 17:56:43.114163 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:56:43 crc kubenswrapper[4995]: I0120 17:56:43.156778 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rvzk2/crc-debug-4pkqc"] Jan 20 17:56:43 crc kubenswrapper[4995]: I0120 17:56:43.168752 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rvzk2/crc-debug-4pkqc"] Jan 20 17:56:43 crc kubenswrapper[4995]: I0120 17:56:43.271288 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vql66\" (UniqueName: \"kubernetes.io/projected/f5d9cfc6-6ade-44d1-b909-8f278727027f-kube-api-access-vql66\") pod \"f5d9cfc6-6ade-44d1-b909-8f278727027f\" (UID: \"f5d9cfc6-6ade-44d1-b909-8f278727027f\") " Jan 20 17:56:43 crc kubenswrapper[4995]: I0120 17:56:43.271678 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5d9cfc6-6ade-44d1-b909-8f278727027f-host\") pod \"f5d9cfc6-6ade-44d1-b909-8f278727027f\" (UID: \"f5d9cfc6-6ade-44d1-b909-8f278727027f\") " Jan 20 17:56:43 crc kubenswrapper[4995]: I0120 17:56:43.272529 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f5d9cfc6-6ade-44d1-b909-8f278727027f-host" (OuterVolumeSpecName: "host") pod "f5d9cfc6-6ade-44d1-b909-8f278727027f" (UID: "f5d9cfc6-6ade-44d1-b909-8f278727027f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 17:56:43 crc kubenswrapper[4995]: I0120 17:56:43.280106 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5d9cfc6-6ade-44d1-b909-8f278727027f-kube-api-access-vql66" (OuterVolumeSpecName: "kube-api-access-vql66") pod "f5d9cfc6-6ade-44d1-b909-8f278727027f" (UID: "f5d9cfc6-6ade-44d1-b909-8f278727027f"). InnerVolumeSpecName "kube-api-access-vql66". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:56:43 crc kubenswrapper[4995]: I0120 17:56:43.375349 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vql66\" (UniqueName: \"kubernetes.io/projected/f5d9cfc6-6ade-44d1-b909-8f278727027f-kube-api-access-vql66\") on node \"crc\" DevicePath \"\"" Jan 20 17:56:43 crc kubenswrapper[4995]: I0120 17:56:43.375409 4995 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f5d9cfc6-6ade-44d1-b909-8f278727027f-host\") on node \"crc\" DevicePath \"\"" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.002022 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5d9cfc6-6ade-44d1-b909-8f278727027f" path="/var/lib/kubelet/pods/f5d9cfc6-6ade-44d1-b909-8f278727027f/volumes" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.011860 4995 scope.go:117] "RemoveContainer" containerID="6a1c7d767c782a0f2feaec25f9a7b96c09954baddcb0d1e1f790b0a43078f9f6" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.011902 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-4pkqc" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.387595 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rvzk2/crc-debug-nfph8"] Jan 20 17:56:44 crc kubenswrapper[4995]: E0120 17:56:44.387984 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5d9cfc6-6ade-44d1-b909-8f278727027f" containerName="container-00" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.387996 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5d9cfc6-6ade-44d1-b909-8f278727027f" containerName="container-00" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.388226 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5d9cfc6-6ade-44d1-b909-8f278727027f" containerName="container-00" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.388835 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.497903 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5c0b545-ce3e-4db6-9808-e263aeb90c02-host\") pod \"crc-debug-nfph8\" (UID: \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\") " pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.498322 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl672\" (UniqueName: \"kubernetes.io/projected/d5c0b545-ce3e-4db6-9808-e263aeb90c02-kube-api-access-sl672\") pod \"crc-debug-nfph8\" (UID: \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\") " pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.600137 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5c0b545-ce3e-4db6-9808-e263aeb90c02-host\") pod \"crc-debug-nfph8\" (UID: \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\") " pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.600283 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl672\" (UniqueName: \"kubernetes.io/projected/d5c0b545-ce3e-4db6-9808-e263aeb90c02-kube-api-access-sl672\") pod \"crc-debug-nfph8\" (UID: \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\") " pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.600385 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5c0b545-ce3e-4db6-9808-e263aeb90c02-host\") pod \"crc-debug-nfph8\" (UID: \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\") " pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.623352 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl672\" (UniqueName: \"kubernetes.io/projected/d5c0b545-ce3e-4db6-9808-e263aeb90c02-kube-api-access-sl672\") pod \"crc-debug-nfph8\" (UID: \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\") " pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:44 crc kubenswrapper[4995]: I0120 17:56:44.710197 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:45 crc kubenswrapper[4995]: I0120 17:56:45.033244 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/crc-debug-nfph8" event={"ID":"d5c0b545-ce3e-4db6-9808-e263aeb90c02","Type":"ContainerStarted","Data":"dc72a98ac7531a104dd378b0cc5a4f4bc588eafff61c357fbe96ed3e22be54a5"} Jan 20 17:56:45 crc kubenswrapper[4995]: I0120 17:56:45.033515 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/crc-debug-nfph8" event={"ID":"d5c0b545-ce3e-4db6-9808-e263aeb90c02","Type":"ContainerStarted","Data":"877eb2165cfb32107e801462b2b614d39adfa50b16bf3a1a282d5c5adf341ba9"} Jan 20 17:56:45 crc kubenswrapper[4995]: I0120 17:56:45.056695 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-rvzk2/crc-debug-nfph8" podStartSLOduration=1.056671991 podStartE2EDuration="1.056671991s" podCreationTimestamp="2026-01-20 17:56:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 17:56:45.046849235 +0000 UTC m=+5123.291454051" watchObservedRunningTime="2026-01-20 17:56:45.056671991 +0000 UTC m=+5123.301276807" Jan 20 17:56:46 crc kubenswrapper[4995]: I0120 17:56:46.041244 4995 generic.go:334] "Generic (PLEG): container finished" podID="d5c0b545-ce3e-4db6-9808-e263aeb90c02" containerID="dc72a98ac7531a104dd378b0cc5a4f4bc588eafff61c357fbe96ed3e22be54a5" exitCode=0 Jan 20 17:56:46 crc kubenswrapper[4995]: I0120 17:56:46.041483 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/crc-debug-nfph8" event={"ID":"d5c0b545-ce3e-4db6-9808-e263aeb90c02","Type":"ContainerDied","Data":"dc72a98ac7531a104dd378b0cc5a4f4bc588eafff61c357fbe96ed3e22be54a5"} Jan 20 17:56:47 crc kubenswrapper[4995]: I0120 17:56:47.165024 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:47 crc kubenswrapper[4995]: I0120 17:56:47.335068 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rvzk2/crc-debug-nfph8"] Jan 20 17:56:47 crc kubenswrapper[4995]: I0120 17:56:47.342136 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rvzk2/crc-debug-nfph8"] Jan 20 17:56:47 crc kubenswrapper[4995]: I0120 17:56:47.342934 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5c0b545-ce3e-4db6-9808-e263aeb90c02-host\") pod \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\" (UID: \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\") " Jan 20 17:56:47 crc kubenswrapper[4995]: I0120 17:56:47.343047 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d5c0b545-ce3e-4db6-9808-e263aeb90c02-host" (OuterVolumeSpecName: "host") pod "d5c0b545-ce3e-4db6-9808-e263aeb90c02" (UID: "d5c0b545-ce3e-4db6-9808-e263aeb90c02"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 17:56:47 crc kubenswrapper[4995]: I0120 17:56:47.343266 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sl672\" (UniqueName: \"kubernetes.io/projected/d5c0b545-ce3e-4db6-9808-e263aeb90c02-kube-api-access-sl672\") pod \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\" (UID: \"d5c0b545-ce3e-4db6-9808-e263aeb90c02\") " Jan 20 17:56:47 crc kubenswrapper[4995]: I0120 17:56:47.343654 4995 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5c0b545-ce3e-4db6-9808-e263aeb90c02-host\") on node \"crc\" DevicePath \"\"" Jan 20 17:56:47 crc kubenswrapper[4995]: I0120 17:56:47.349822 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5c0b545-ce3e-4db6-9808-e263aeb90c02-kube-api-access-sl672" (OuterVolumeSpecName: "kube-api-access-sl672") pod "d5c0b545-ce3e-4db6-9808-e263aeb90c02" (UID: "d5c0b545-ce3e-4db6-9808-e263aeb90c02"). InnerVolumeSpecName "kube-api-access-sl672". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:56:47 crc kubenswrapper[4995]: I0120 17:56:47.445755 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sl672\" (UniqueName: \"kubernetes.io/projected/d5c0b545-ce3e-4db6-9808-e263aeb90c02-kube-api-access-sl672\") on node \"crc\" DevicePath \"\"" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.015244 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5c0b545-ce3e-4db6-9808-e263aeb90c02" path="/var/lib/kubelet/pods/d5c0b545-ce3e-4db6-9808-e263aeb90c02/volumes" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.067269 4995 scope.go:117] "RemoveContainer" containerID="dc72a98ac7531a104dd378b0cc5a4f4bc588eafff61c357fbe96ed3e22be54a5" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.067342 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-nfph8" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.560753 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-rvzk2/crc-debug-kggcf"] Jan 20 17:56:48 crc kubenswrapper[4995]: E0120 17:56:48.561147 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5c0b545-ce3e-4db6-9808-e263aeb90c02" containerName="container-00" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.561162 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5c0b545-ce3e-4db6-9808-e263aeb90c02" containerName="container-00" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.561404 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5c0b545-ce3e-4db6-9808-e263aeb90c02" containerName="container-00" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.562102 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.670327 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbq7z\" (UniqueName: \"kubernetes.io/projected/b47e4d95-5acf-4149-a521-d1f51d368960-kube-api-access-tbq7z\") pod \"crc-debug-kggcf\" (UID: \"b47e4d95-5acf-4149-a521-d1f51d368960\") " pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.670391 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b47e4d95-5acf-4149-a521-d1f51d368960-host\") pod \"crc-debug-kggcf\" (UID: \"b47e4d95-5acf-4149-a521-d1f51d368960\") " pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.773467 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b47e4d95-5acf-4149-a521-d1f51d368960-host\") pod \"crc-debug-kggcf\" (UID: \"b47e4d95-5acf-4149-a521-d1f51d368960\") " pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.773687 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b47e4d95-5acf-4149-a521-d1f51d368960-host\") pod \"crc-debug-kggcf\" (UID: \"b47e4d95-5acf-4149-a521-d1f51d368960\") " pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.773939 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbq7z\" (UniqueName: \"kubernetes.io/projected/b47e4d95-5acf-4149-a521-d1f51d368960-kube-api-access-tbq7z\") pod \"crc-debug-kggcf\" (UID: \"b47e4d95-5acf-4149-a521-d1f51d368960\") " pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.802233 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbq7z\" (UniqueName: \"kubernetes.io/projected/b47e4d95-5acf-4149-a521-d1f51d368960-kube-api-access-tbq7z\") pod \"crc-debug-kggcf\" (UID: \"b47e4d95-5acf-4149-a521-d1f51d368960\") " pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:48 crc kubenswrapper[4995]: I0120 17:56:48.887606 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:48 crc kubenswrapper[4995]: W0120 17:56:48.924472 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb47e4d95_5acf_4149_a521_d1f51d368960.slice/crio-032114ed94cc5e5c4dc9ec803c752303435d97947d9f356e16f8f5f477751091 WatchSource:0}: Error finding container 032114ed94cc5e5c4dc9ec803c752303435d97947d9f356e16f8f5f477751091: Status 404 returned error can't find the container with id 032114ed94cc5e5c4dc9ec803c752303435d97947d9f356e16f8f5f477751091 Jan 20 17:56:49 crc kubenswrapper[4995]: I0120 17:56:49.085378 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/crc-debug-kggcf" event={"ID":"b47e4d95-5acf-4149-a521-d1f51d368960","Type":"ContainerStarted","Data":"032114ed94cc5e5c4dc9ec803c752303435d97947d9f356e16f8f5f477751091"} Jan 20 17:56:50 crc kubenswrapper[4995]: I0120 17:56:50.103883 4995 generic.go:334] "Generic (PLEG): container finished" podID="b47e4d95-5acf-4149-a521-d1f51d368960" containerID="98252bc424a3968706eb261fd14eec439eaa54970935a661614d5ee41365bd2d" exitCode=0 Jan 20 17:56:50 crc kubenswrapper[4995]: I0120 17:56:50.104044 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-rvzk2/crc-debug-kggcf" event={"ID":"b47e4d95-5acf-4149-a521-d1f51d368960","Type":"ContainerDied","Data":"98252bc424a3968706eb261fd14eec439eaa54970935a661614d5ee41365bd2d"} Jan 20 17:56:50 crc kubenswrapper[4995]: I0120 17:56:50.160628 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-rvzk2/crc-debug-kggcf"] Jan 20 17:56:50 crc kubenswrapper[4995]: I0120 17:56:50.176908 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-rvzk2/crc-debug-kggcf"] Jan 20 17:56:50 crc kubenswrapper[4995]: I0120 17:56:50.846089 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-s4m7v_c4363779-0c13-4195-9d79-aa4271bfc02f/cert-manager-controller/0.log" Jan 20 17:56:50 crc kubenswrapper[4995]: I0120 17:56:50.860395 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-s9d5f_4b2e374d-19bf-42a0-8f00-7dea7ac84bea/cert-manager-cainjector/0.log" Jan 20 17:56:50 crc kubenswrapper[4995]: I0120 17:56:50.868410 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-zngmd_ac9c3170-cb6c-4320-ad74-57b76462b730/cert-manager-webhook/0.log" Jan 20 17:56:50 crc kubenswrapper[4995]: I0120 17:56:50.990025 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:56:50 crc kubenswrapper[4995]: E0120 17:56:50.990565 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:56:51 crc kubenswrapper[4995]: I0120 17:56:51.222587 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:51 crc kubenswrapper[4995]: I0120 17:56:51.327854 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b47e4d95-5acf-4149-a521-d1f51d368960-host\") pod \"b47e4d95-5acf-4149-a521-d1f51d368960\" (UID: \"b47e4d95-5acf-4149-a521-d1f51d368960\") " Jan 20 17:56:51 crc kubenswrapper[4995]: I0120 17:56:51.327971 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbq7z\" (UniqueName: \"kubernetes.io/projected/b47e4d95-5acf-4149-a521-d1f51d368960-kube-api-access-tbq7z\") pod \"b47e4d95-5acf-4149-a521-d1f51d368960\" (UID: \"b47e4d95-5acf-4149-a521-d1f51d368960\") " Jan 20 17:56:51 crc kubenswrapper[4995]: I0120 17:56:51.327983 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b47e4d95-5acf-4149-a521-d1f51d368960-host" (OuterVolumeSpecName: "host") pod "b47e4d95-5acf-4149-a521-d1f51d368960" (UID: "b47e4d95-5acf-4149-a521-d1f51d368960"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 17:56:51 crc kubenswrapper[4995]: I0120 17:56:51.328605 4995 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b47e4d95-5acf-4149-a521-d1f51d368960-host\") on node \"crc\" DevicePath \"\"" Jan 20 17:56:51 crc kubenswrapper[4995]: I0120 17:56:51.332967 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b47e4d95-5acf-4149-a521-d1f51d368960-kube-api-access-tbq7z" (OuterVolumeSpecName: "kube-api-access-tbq7z") pod "b47e4d95-5acf-4149-a521-d1f51d368960" (UID: "b47e4d95-5acf-4149-a521-d1f51d368960"). InnerVolumeSpecName "kube-api-access-tbq7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 17:56:51 crc kubenswrapper[4995]: I0120 17:56:51.430395 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbq7z\" (UniqueName: \"kubernetes.io/projected/b47e4d95-5acf-4149-a521-d1f51d368960-kube-api-access-tbq7z\") on node \"crc\" DevicePath \"\"" Jan 20 17:56:52 crc kubenswrapper[4995]: I0120 17:56:52.004927 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b47e4d95-5acf-4149-a521-d1f51d368960" path="/var/lib/kubelet/pods/b47e4d95-5acf-4149-a521-d1f51d368960/volumes" Jan 20 17:56:52 crc kubenswrapper[4995]: I0120 17:56:52.124940 4995 scope.go:117] "RemoveContainer" containerID="98252bc424a3968706eb261fd14eec439eaa54970935a661614d5ee41365bd2d" Jan 20 17:56:52 crc kubenswrapper[4995]: I0120 17:56:52.125296 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-rvzk2/crc-debug-kggcf" Jan 20 17:56:56 crc kubenswrapper[4995]: I0120 17:56:56.638485 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-wlqrf_e9e16a4a-ae36-4787-936d-78f9f621b82b/nmstate-console-plugin/0.log" Jan 20 17:56:56 crc kubenswrapper[4995]: I0120 17:56:56.661870 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-lzkmg_04b8b048-2dd6-4899-8012-e20e4783fe36/nmstate-handler/0.log" Jan 20 17:56:56 crc kubenswrapper[4995]: I0120 17:56:56.683933 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-wb9tm_95224d03-a236-4419-9ea7-35b72ad16367/nmstate-metrics/0.log" Jan 20 17:56:56 crc kubenswrapper[4995]: I0120 17:56:56.690340 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-wb9tm_95224d03-a236-4419-9ea7-35b72ad16367/kube-rbac-proxy/0.log" Jan 20 17:56:56 crc kubenswrapper[4995]: I0120 17:56:56.711571 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-l5g5b_eb9959cc-1ba1-48c5-9a2b-846fb2ae6590/nmstate-operator/0.log" Jan 20 17:56:56 crc kubenswrapper[4995]: I0120 17:56:56.723787 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-95rj4_5afe74bc-6c6a-4c69-8991-aea61b381a53/nmstate-webhook/0.log" Jan 20 17:57:02 crc kubenswrapper[4995]: I0120 17:57:02.356996 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-q94m9_5def50d1-b2d8-447a-8f22-8632fd26d689/prometheus-operator/0.log" Jan 20 17:57:02 crc kubenswrapper[4995]: I0120 17:57:02.368390 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl_27395fe5-dac8-4556-8446-a478ea8f7928/prometheus-operator-admission-webhook/0.log" Jan 20 17:57:02 crc kubenswrapper[4995]: I0120 17:57:02.381484 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77_15bbd7b9-457e-4456-ba6a-5f664a592bab/prometheus-operator-admission-webhook/0.log" Jan 20 17:57:02 crc kubenswrapper[4995]: I0120 17:57:02.412325 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-h2hm4_decae503-5765-4258-9081-981c2215ebcf/operator/0.log" Jan 20 17:57:02 crc kubenswrapper[4995]: I0120 17:57:02.427244 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-thhmm_502777ef-bdd5-4d42-b695-a7259cd811c9/perses-operator/0.log" Jan 20 17:57:05 crc kubenswrapper[4995]: I0120 17:57:05.990492 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:57:05 crc kubenswrapper[4995]: E0120 17:57:05.991310 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:57:09 crc kubenswrapper[4995]: I0120 17:57:09.715360 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/controller/0.log" Jan 20 17:57:09 crc kubenswrapper[4995]: I0120 17:57:09.736160 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/kube-rbac-proxy/0.log" Jan 20 17:57:09 crc kubenswrapper[4995]: I0120 17:57:09.753684 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/controller/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.264347 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.275841 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/reloader/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.281299 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr-metrics/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.289924 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.299712 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy-frr/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.306190 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-frr-files/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.310353 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-reloader/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.317262 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-metrics/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.326934 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-zwjrk_473c4019-d6be-4420-a678-d18999ddbe1c/frr-k8s-webhook-server/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.345011 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6dd7779458-w2rt4_8b17b582-a06b-4ece-b513-7f826c838f6f/manager/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.354908 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5c6d4b5599-f8tsv_7fae7627-5782-4525-ba17-4507d15764cd/webhook-server/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.777012 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/speaker/0.log" Jan 20 17:57:11 crc kubenswrapper[4995]: I0120 17:57:11.784982 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/kube-rbac-proxy/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.630399 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b_d10c311c-330e-4ef3-bfb4-bbb14ca8d42d/extract/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.639286 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b_d10c311c-330e-4ef3-bfb4-bbb14ca8d42d/util/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.647590 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b_d10c311c-330e-4ef3-bfb4-bbb14ca8d42d/pull/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.662104 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp_66d983b8-16a0-44ba-8e76-c1a6645c2001/extract/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.670768 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp_66d983b8-16a0-44ba-8e76-c1a6645c2001/util/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.678008 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp_66d983b8-16a0-44ba-8e76-c1a6645c2001/pull/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.691643 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc_bb24643a-1c98-49d5-a82c-53b3f9fb88f6/extract/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.697197 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc_bb24643a-1c98-49d5-a82c-53b3f9fb88f6/util/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.706682 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc_bb24643a-1c98-49d5-a82c-53b3f9fb88f6/pull/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.918000 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4fcqp_075a85ee-fd7b-44e9-a631-840b4fcc96fb/registry-server/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.922057 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4fcqp_075a85ee-fd7b-44e9-a631-840b4fcc96fb/extract-utilities/0.log" Jan 20 17:57:15 crc kubenswrapper[4995]: I0120 17:57:15.928133 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4fcqp_075a85ee-fd7b-44e9-a631-840b4fcc96fb/extract-content/0.log" Jan 20 17:57:16 crc kubenswrapper[4995]: I0120 17:57:16.726835 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x4fxr_04b5f989-6750-4e5c-8ded-4af0bf07325b/registry-server/0.log" Jan 20 17:57:16 crc kubenswrapper[4995]: I0120 17:57:16.747734 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x4fxr_04b5f989-6750-4e5c-8ded-4af0bf07325b/extract-utilities/0.log" Jan 20 17:57:16 crc kubenswrapper[4995]: I0120 17:57:16.752166 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x4fxr_04b5f989-6750-4e5c-8ded-4af0bf07325b/extract-content/0.log" Jan 20 17:57:16 crc kubenswrapper[4995]: I0120 17:57:16.766312 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-l2zqv_9c2404e7-457d-4f79-814d-f6a44e88c749/marketplace-operator/0.log" Jan 20 17:57:16 crc kubenswrapper[4995]: I0120 17:57:16.973326 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-62wzq_9539d617-3abb-4dd5-aa3a-f9f6dd8615bb/registry-server/0.log" Jan 20 17:57:16 crc kubenswrapper[4995]: I0120 17:57:16.980512 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-62wzq_9539d617-3abb-4dd5-aa3a-f9f6dd8615bb/extract-utilities/0.log" Jan 20 17:57:16 crc kubenswrapper[4995]: I0120 17:57:16.987110 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-62wzq_9539d617-3abb-4dd5-aa3a-f9f6dd8615bb/extract-content/0.log" Jan 20 17:57:17 crc kubenswrapper[4995]: I0120 17:57:17.406833 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nzn96_144eba19-b4f0-48d9-a1f6-fc191b87c617/registry-server/0.log" Jan 20 17:57:17 crc kubenswrapper[4995]: I0120 17:57:17.411433 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nzn96_144eba19-b4f0-48d9-a1f6-fc191b87c617/extract-utilities/0.log" Jan 20 17:57:17 crc kubenswrapper[4995]: I0120 17:57:17.417925 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nzn96_144eba19-b4f0-48d9-a1f6-fc191b87c617/extract-content/0.log" Jan 20 17:57:19 crc kubenswrapper[4995]: I0120 17:57:19.989540 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:57:19 crc kubenswrapper[4995]: E0120 17:57:19.990306 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:57:21 crc kubenswrapper[4995]: I0120 17:57:21.271235 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-q94m9_5def50d1-b2d8-447a-8f22-8632fd26d689/prometheus-operator/0.log" Jan 20 17:57:21 crc kubenswrapper[4995]: I0120 17:57:21.286732 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl_27395fe5-dac8-4556-8446-a478ea8f7928/prometheus-operator-admission-webhook/0.log" Jan 20 17:57:21 crc kubenswrapper[4995]: I0120 17:57:21.299395 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77_15bbd7b9-457e-4456-ba6a-5f664a592bab/prometheus-operator-admission-webhook/0.log" Jan 20 17:57:21 crc kubenswrapper[4995]: I0120 17:57:21.331573 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-h2hm4_decae503-5765-4258-9081-981c2215ebcf/operator/0.log" Jan 20 17:57:21 crc kubenswrapper[4995]: I0120 17:57:21.342485 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-thhmm_502777ef-bdd5-4d42-b695-a7259cd811c9/perses-operator/0.log" Jan 20 17:57:34 crc kubenswrapper[4995]: I0120 17:57:34.989265 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:57:34 crc kubenswrapper[4995]: E0120 17:57:34.989936 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:57:46 crc kubenswrapper[4995]: I0120 17:57:46.990337 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:57:46 crc kubenswrapper[4995]: E0120 17:57:46.991242 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:58:01 crc kubenswrapper[4995]: I0120 17:58:01.998315 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:58:02 crc kubenswrapper[4995]: E0120 17:58:02.000353 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:58:14 crc kubenswrapper[4995]: I0120 17:58:14.990276 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:58:14 crc kubenswrapper[4995]: E0120 17:58:14.990995 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:58:25 crc kubenswrapper[4995]: I0120 17:58:25.989320 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:58:25 crc kubenswrapper[4995]: E0120 17:58:25.990097 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:58:36 crc kubenswrapper[4995]: I0120 17:58:36.990269 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:58:36 crc kubenswrapper[4995]: E0120 17:58:36.991041 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:58:41 crc kubenswrapper[4995]: I0120 17:58:41.677533 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-q94m9_5def50d1-b2d8-447a-8f22-8632fd26d689/prometheus-operator/0.log" Jan 20 17:58:41 crc kubenswrapper[4995]: I0120 17:58:41.688584 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl_27395fe5-dac8-4556-8446-a478ea8f7928/prometheus-operator-admission-webhook/0.log" Jan 20 17:58:41 crc kubenswrapper[4995]: I0120 17:58:41.697638 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77_15bbd7b9-457e-4456-ba6a-5f664a592bab/prometheus-operator-admission-webhook/0.log" Jan 20 17:58:41 crc kubenswrapper[4995]: I0120 17:58:41.745659 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-h2hm4_decae503-5765-4258-9081-981c2215ebcf/operator/0.log" Jan 20 17:58:41 crc kubenswrapper[4995]: I0120 17:58:41.766755 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-thhmm_502777ef-bdd5-4d42-b695-a7259cd811c9/perses-operator/0.log" Jan 20 17:58:41 crc kubenswrapper[4995]: I0120 17:58:41.929499 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-s4m7v_c4363779-0c13-4195-9d79-aa4271bfc02f/cert-manager-controller/0.log" Jan 20 17:58:41 crc kubenswrapper[4995]: I0120 17:58:41.947342 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-s9d5f_4b2e374d-19bf-42a0-8f00-7dea7ac84bea/cert-manager-cainjector/0.log" Jan 20 17:58:41 crc kubenswrapper[4995]: I0120 17:58:41.957719 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-zngmd_ac9c3170-cb6c-4320-ad74-57b76462b730/cert-manager-webhook/0.log" Jan 20 17:58:42 crc kubenswrapper[4995]: I0120 17:58:42.826654 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/controller/0.log" Jan 20 17:58:42 crc kubenswrapper[4995]: I0120 17:58:42.833114 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/kube-rbac-proxy/0.log" Jan 20 17:58:42 crc kubenswrapper[4995]: I0120 17:58:42.861099 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/controller/0.log" Jan 20 17:58:42 crc kubenswrapper[4995]: I0120 17:58:42.960182 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-7zdch_c0a3e997-8709-444b-ae4e-8fc34b04cb6e/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.036932 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-zgvcz_3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.052271 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-mj76w_9f302bf3-1501-44cc-924c-2e5c42c0eb58/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.060232 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/extract/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.071975 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/util/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.079922 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/pull/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.234175 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-bxm9j_072647c8-2d0e-4716-bb29-a87e3ff5cd29/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.243425 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-wm2kb_49392c07-237b-447e-a126-f06e1cbf32a2/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.287746 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-zs4nf_c8061771-759d-49d5-b88b-9d66f45277ac/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.676575 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-zd75z_439ab902-28ff-48a4-81e4-93c72937e573/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.692278 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-7p5v4_f4577775-2c19-495a-95e7-1638f359b533/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.806694 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-dwn52_a3c2211e-845d-47cc-b4a5-962340b0d53c/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.827004 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-wjrpf_93ac6eeb-0456-4cfe-8298-b8b97d09716c/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.869960 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-qd2nk_47ec26a3-41ca-482f-b539-c9dc32af0bb0/manager/0.log" Jan 20 17:58:43 crc kubenswrapper[4995]: I0120 17:58:43.934481 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-m7p7b_86d4f806-c5e4-4ce0-a859-5e104b0d5dce/manager/0.log" Jan 20 17:58:44 crc kubenswrapper[4995]: I0120 17:58:44.077498 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-95tbl_ffe39c73-665e-4de6-afb5-2e9b93419e33/manager/0.log" Jan 20 17:58:44 crc kubenswrapper[4995]: I0120 17:58:44.087588 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-fk7x2_de6fc9c2-f9a9-41fd-8cfb-b0493d823c20/manager/0.log" Jan 20 17:58:44 crc kubenswrapper[4995]: I0120 17:58:44.100254 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9_50e51652-8f18-4234-b29b-85e684e63bfd/manager/0.log" Jan 20 17:58:44 crc kubenswrapper[4995]: I0120 17:58:44.307292 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5c987874f9-t2thd_6bd0aa66-ff4d-43ff-925d-e3ead5943058/operator/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.551668 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.563264 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/reloader/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.567709 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr-metrics/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.578313 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.585321 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy-frr/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.592303 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-frr-files/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.600277 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-reloader/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.604592 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-metrics/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.614743 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-zwjrk_473c4019-d6be-4420-a678-d18999ddbe1c/frr-k8s-webhook-server/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.640003 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6dd7779458-w2rt4_8b17b582-a06b-4ece-b513-7f826c838f6f/manager/0.log" Jan 20 17:58:45 crc kubenswrapper[4995]: I0120 17:58:45.652045 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5c6d4b5599-f8tsv_7fae7627-5782-4525-ba17-4507d15764cd/webhook-server/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.227913 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/speaker/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.232979 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7696897b84-8gt6d_bb15a8a1-9d6b-4032-9ecb-71719f2b3d91/manager/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.235426 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/kube-rbac-proxy/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.248094 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-r87vf_3e43abde-a2a7-4334-a3a2-7859aad1a87b/registry-server/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.300718 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-vvwk8_9d2f128c-9463-4735-9bf7-91bff7148887/manager/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.336967 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-6rn8v_37f347f2-1ab4-4e49-9340-57a960ff8eb1/manager/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.357622 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-jwzhh_31bd181f-39ff-4e9f-949c-8a6ed84f3f42/operator/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.383908 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-dxnvv_d0afd012-c6e1-4a66-a8a1-9edccfdff278/manager/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.559329 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-fdpgr_c2d307fa-2be9-4f04-8ae4-f3b55e987ceb/manager/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.575689 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-csd5m_8a04fc71-9575-4cf5-bdab-2c741002c47f/manager/0.log" Jan 20 17:58:46 crc kubenswrapper[4995]: I0120 17:58:46.645159 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6db9b5db6c-29hz8_17dfb7c9-6832-48d3-ad83-91508cf85de3/manager/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.086742 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-s4m7v_c4363779-0c13-4195-9d79-aa4271bfc02f/cert-manager-controller/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.103197 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-s9d5f_4b2e374d-19bf-42a0-8f00-7dea7ac84bea/cert-manager-cainjector/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.113317 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-zngmd_ac9c3170-cb6c-4320-ad74-57b76462b730/cert-manager-webhook/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.729112 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-xxvtq_5573e17e-4b7e-4afd-8608-e8afd1c98256/control-plane-machine-set-operator/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.738895 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l4kss_2585d1e6-a851-4ecc-8acd-8fd3d2426576/kube-rbac-proxy/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.750092 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l4kss_2585d1e6-a851-4ecc-8acd-8fd3d2426576/machine-api-operator/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.945154 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-wlqrf_e9e16a4a-ae36-4787-936d-78f9f621b82b/nmstate-console-plugin/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.960899 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-lzkmg_04b8b048-2dd6-4899-8012-e20e4783fe36/nmstate-handler/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.973011 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-wb9tm_95224d03-a236-4419-9ea7-35b72ad16367/nmstate-metrics/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.980767 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-wb9tm_95224d03-a236-4419-9ea7-35b72ad16367/kube-rbac-proxy/0.log" Jan 20 17:58:47 crc kubenswrapper[4995]: I0120 17:58:47.998132 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-l5g5b_eb9959cc-1ba1-48c5-9a2b-846fb2ae6590/nmstate-operator/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.007387 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-95rj4_5afe74bc-6c6a-4c69-8991-aea61b381a53/nmstate-webhook/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.556358 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-7zdch_c0a3e997-8709-444b-ae4e-8fc34b04cb6e/manager/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.596216 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-zgvcz_3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a/manager/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.607651 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-mj76w_9f302bf3-1501-44cc-924c-2e5c42c0eb58/manager/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.615266 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/extract/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.622054 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/util/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.630267 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/pull/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.738148 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-bxm9j_072647c8-2d0e-4716-bb29-a87e3ff5cd29/manager/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.748812 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-wm2kb_49392c07-237b-447e-a126-f06e1cbf32a2/manager/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.782035 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-zs4nf_c8061771-759d-49d5-b88b-9d66f45277ac/manager/0.log" Jan 20 17:58:48 crc kubenswrapper[4995]: I0120 17:58:48.989532 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:58:48 crc kubenswrapper[4995]: E0120 17:58:48.989778 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.108838 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-zd75z_439ab902-28ff-48a4-81e4-93c72937e573/manager/0.log" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.121280 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-7p5v4_f4577775-2c19-495a-95e7-1638f359b533/manager/0.log" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.219298 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-dwn52_a3c2211e-845d-47cc-b4a5-962340b0d53c/manager/0.log" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.228884 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-wjrpf_93ac6eeb-0456-4cfe-8298-b8b97d09716c/manager/0.log" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.276115 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-qd2nk_47ec26a3-41ca-482f-b539-c9dc32af0bb0/manager/0.log" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.338406 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-m7p7b_86d4f806-c5e4-4ce0-a859-5e104b0d5dce/manager/0.log" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.440818 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-95tbl_ffe39c73-665e-4de6-afb5-2e9b93419e33/manager/0.log" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.452173 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-fk7x2_de6fc9c2-f9a9-41fd-8cfb-b0493d823c20/manager/0.log" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.466401 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9_50e51652-8f18-4234-b29b-85e684e63bfd/manager/0.log" Jan 20 17:58:49 crc kubenswrapper[4995]: I0120 17:58:49.569480 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5c987874f9-t2thd_6bd0aa66-ff4d-43ff-925d-e3ead5943058/operator/0.log" Jan 20 17:58:50 crc kubenswrapper[4995]: I0120 17:58:50.992967 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7696897b84-8gt6d_bb15a8a1-9d6b-4032-9ecb-71719f2b3d91/manager/0.log" Jan 20 17:58:51 crc kubenswrapper[4995]: I0120 17:58:51.008706 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-r87vf_3e43abde-a2a7-4334-a3a2-7859aad1a87b/registry-server/0.log" Jan 20 17:58:51 crc kubenswrapper[4995]: I0120 17:58:51.062052 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-vvwk8_9d2f128c-9463-4735-9bf7-91bff7148887/manager/0.log" Jan 20 17:58:51 crc kubenswrapper[4995]: I0120 17:58:51.127907 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-6rn8v_37f347f2-1ab4-4e49-9340-57a960ff8eb1/manager/0.log" Jan 20 17:58:51 crc kubenswrapper[4995]: I0120 17:58:51.150001 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-jwzhh_31bd181f-39ff-4e9f-949c-8a6ed84f3f42/operator/0.log" Jan 20 17:58:51 crc kubenswrapper[4995]: I0120 17:58:51.177243 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-dxnvv_d0afd012-c6e1-4a66-a8a1-9edccfdff278/manager/0.log" Jan 20 17:58:51 crc kubenswrapper[4995]: I0120 17:58:51.408569 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-fdpgr_c2d307fa-2be9-4f04-8ae4-f3b55e987ceb/manager/0.log" Jan 20 17:58:51 crc kubenswrapper[4995]: I0120 17:58:51.422778 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-csd5m_8a04fc71-9575-4cf5-bdab-2c741002c47f/manager/0.log" Jan 20 17:58:51 crc kubenswrapper[4995]: I0120 17:58:51.486363 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6db9b5db6c-29hz8_17dfb7c9-6832-48d3-ad83-91508cf85de3/manager/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.130114 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/kube-multus-additional-cni-plugins/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.139348 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/egress-router-binary-copy/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.147288 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/cni-plugins/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.154966 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/bond-cni-plugin/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.162017 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/routeoverride-cni/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.167171 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/whereabouts-cni-bincopy/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.174123 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/whereabouts-cni/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.205550 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-vw77m_7b1451e1-776c-411e-9790-8091d11c01fd/multus-admission-controller/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.210448 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-vw77m_7b1451e1-776c-411e-9790-8091d11c01fd/kube-rbac-proxy/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.252528 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/2.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.338825 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/3.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.369267 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-kbdtf_9dfc8bb5-28e8-4ba3-8009-09d5585a1a12/network-metrics-daemon/0.log" Jan 20 17:58:53 crc kubenswrapper[4995]: I0120 17:58:53.375843 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-kbdtf_9dfc8bb5-28e8-4ba3-8009-09d5585a1a12/kube-rbac-proxy/0.log" Jan 20 17:59:02 crc kubenswrapper[4995]: I0120 17:59:02.989745 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:59:02 crc kubenswrapper[4995]: E0120 17:59:02.990571 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:59:14 crc kubenswrapper[4995]: I0120 17:59:14.989635 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:59:14 crc kubenswrapper[4995]: E0120 17:59:14.990373 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:59:29 crc kubenswrapper[4995]: I0120 17:59:29.989917 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:59:29 crc kubenswrapper[4995]: E0120 17:59:29.991045 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 17:59:41 crc kubenswrapper[4995]: I0120 17:59:41.998368 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 17:59:42 crc kubenswrapper[4995]: I0120 17:59:42.780791 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"c108ef8e65c0aab2d079e6f81528b8426ba20adcd99ad2e4a31d4644ad933328"} Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.161011 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g"] Jan 20 18:00:00 crc kubenswrapper[4995]: E0120 18:00:00.162495 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b47e4d95-5acf-4149-a521-d1f51d368960" containerName="container-00" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.162524 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="b47e4d95-5acf-4149-a521-d1f51d368960" containerName="container-00" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.162938 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="b47e4d95-5acf-4149-a521-d1f51d368960" containerName="container-00" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.164015 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.166220 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.167990 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.188348 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g"] Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.201885 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rtm8\" (UniqueName: \"kubernetes.io/projected/ca1d336d-024b-413d-b07d-f04552656570-kube-api-access-2rtm8\") pod \"collect-profiles-29482200-rww5g\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.202198 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca1d336d-024b-413d-b07d-f04552656570-config-volume\") pod \"collect-profiles-29482200-rww5g\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.202347 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca1d336d-024b-413d-b07d-f04552656570-secret-volume\") pod \"collect-profiles-29482200-rww5g\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.303461 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca1d336d-024b-413d-b07d-f04552656570-secret-volume\") pod \"collect-profiles-29482200-rww5g\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.303614 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rtm8\" (UniqueName: \"kubernetes.io/projected/ca1d336d-024b-413d-b07d-f04552656570-kube-api-access-2rtm8\") pod \"collect-profiles-29482200-rww5g\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.303665 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca1d336d-024b-413d-b07d-f04552656570-config-volume\") pod \"collect-profiles-29482200-rww5g\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.304539 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca1d336d-024b-413d-b07d-f04552656570-config-volume\") pod \"collect-profiles-29482200-rww5g\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.309318 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca1d336d-024b-413d-b07d-f04552656570-secret-volume\") pod \"collect-profiles-29482200-rww5g\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.320719 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rtm8\" (UniqueName: \"kubernetes.io/projected/ca1d336d-024b-413d-b07d-f04552656570-kube-api-access-2rtm8\") pod \"collect-profiles-29482200-rww5g\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.486308 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.915442 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g"] Jan 20 18:00:00 crc kubenswrapper[4995]: I0120 18:00:00.970802 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" event={"ID":"ca1d336d-024b-413d-b07d-f04552656570","Type":"ContainerStarted","Data":"fd3df0089d7f2a85eedefe44dc97ab77131fde764822f0280b83f31b9ee2b67e"} Jan 20 18:00:02 crc kubenswrapper[4995]: I0120 18:00:02.009885 4995 generic.go:334] "Generic (PLEG): container finished" podID="ca1d336d-024b-413d-b07d-f04552656570" containerID="b14a87a125e7e41247218d2a5a2f6d0821d94f21b585fed1eb9d1020cdee3b04" exitCode=0 Jan 20 18:00:02 crc kubenswrapper[4995]: I0120 18:00:02.012097 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" event={"ID":"ca1d336d-024b-413d-b07d-f04552656570","Type":"ContainerDied","Data":"b14a87a125e7e41247218d2a5a2f6d0821d94f21b585fed1eb9d1020cdee3b04"} Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.759531 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.872337 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca1d336d-024b-413d-b07d-f04552656570-config-volume\") pod \"ca1d336d-024b-413d-b07d-f04552656570\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.872428 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca1d336d-024b-413d-b07d-f04552656570-secret-volume\") pod \"ca1d336d-024b-413d-b07d-f04552656570\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.872452 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rtm8\" (UniqueName: \"kubernetes.io/projected/ca1d336d-024b-413d-b07d-f04552656570-kube-api-access-2rtm8\") pod \"ca1d336d-024b-413d-b07d-f04552656570\" (UID: \"ca1d336d-024b-413d-b07d-f04552656570\") " Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.872965 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca1d336d-024b-413d-b07d-f04552656570-config-volume" (OuterVolumeSpecName: "config-volume") pod "ca1d336d-024b-413d-b07d-f04552656570" (UID: "ca1d336d-024b-413d-b07d-f04552656570"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.880451 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca1d336d-024b-413d-b07d-f04552656570-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ca1d336d-024b-413d-b07d-f04552656570" (UID: "ca1d336d-024b-413d-b07d-f04552656570"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.880644 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca1d336d-024b-413d-b07d-f04552656570-kube-api-access-2rtm8" (OuterVolumeSpecName: "kube-api-access-2rtm8") pod "ca1d336d-024b-413d-b07d-f04552656570" (UID: "ca1d336d-024b-413d-b07d-f04552656570"). InnerVolumeSpecName "kube-api-access-2rtm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.974661 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca1d336d-024b-413d-b07d-f04552656570-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.974983 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca1d336d-024b-413d-b07d-f04552656570-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 18:00:03 crc kubenswrapper[4995]: I0120 18:00:03.974993 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rtm8\" (UniqueName: \"kubernetes.io/projected/ca1d336d-024b-413d-b07d-f04552656570-kube-api-access-2rtm8\") on node \"crc\" DevicePath \"\"" Jan 20 18:00:04 crc kubenswrapper[4995]: I0120 18:00:04.036924 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" event={"ID":"ca1d336d-024b-413d-b07d-f04552656570","Type":"ContainerDied","Data":"fd3df0089d7f2a85eedefe44dc97ab77131fde764822f0280b83f31b9ee2b67e"} Jan 20 18:00:04 crc kubenswrapper[4995]: I0120 18:00:04.036976 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd3df0089d7f2a85eedefe44dc97ab77131fde764822f0280b83f31b9ee2b67e" Jan 20 18:00:04 crc kubenswrapper[4995]: I0120 18:00:04.037008 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g" Jan 20 18:00:04 crc kubenswrapper[4995]: I0120 18:00:04.860318 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv"] Jan 20 18:00:04 crc kubenswrapper[4995]: I0120 18:00:04.874642 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482155-hq2zv"] Jan 20 18:00:06 crc kubenswrapper[4995]: I0120 18:00:05.999754 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5115dbbb-9f37-4041-a81c-e575552102ba" path="/var/lib/kubelet/pods/5115dbbb-9f37-4041-a81c-e575552102ba/volumes" Jan 20 18:00:35 crc kubenswrapper[4995]: I0120 18:00:35.550808 4995 scope.go:117] "RemoveContainer" containerID="ec1c4b19d6fb0f822af3bc31fa4ccc39b8335a27accf81a1037d41596b77b6d1" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.145002 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29482201-zttwb"] Jan 20 18:01:00 crc kubenswrapper[4995]: E0120 18:01:00.146065 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca1d336d-024b-413d-b07d-f04552656570" containerName="collect-profiles" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.148629 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca1d336d-024b-413d-b07d-f04552656570" containerName="collect-profiles" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.149009 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca1d336d-024b-413d-b07d-f04552656570" containerName="collect-profiles" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.149902 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.155893 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29482201-zttwb"] Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.226649 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-config-data\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.226705 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-fernet-keys\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.226898 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mckhz\" (UniqueName: \"kubernetes.io/projected/a190e372-ffc1-43d0-bb58-b44814c479ed-kube-api-access-mckhz\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.226942 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-combined-ca-bundle\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.328632 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mckhz\" (UniqueName: \"kubernetes.io/projected/a190e372-ffc1-43d0-bb58-b44814c479ed-kube-api-access-mckhz\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.329010 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-combined-ca-bundle\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.329099 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-config-data\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.329122 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-fernet-keys\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.337031 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-fernet-keys\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.337974 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-config-data\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.339098 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-combined-ca-bundle\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.345787 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mckhz\" (UniqueName: \"kubernetes.io/projected/a190e372-ffc1-43d0-bb58-b44814c479ed-kube-api-access-mckhz\") pod \"keystone-cron-29482201-zttwb\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:00 crc kubenswrapper[4995]: I0120 18:01:00.545230 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:01 crc kubenswrapper[4995]: W0120 18:01:01.049883 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda190e372_ffc1_43d0_bb58_b44814c479ed.slice/crio-9596bf5d77d07cab3ce64e43d74d871a55a16f20e414d7a40514d9b3975d2fdd WatchSource:0}: Error finding container 9596bf5d77d07cab3ce64e43d74d871a55a16f20e414d7a40514d9b3975d2fdd: Status 404 returned error can't find the container with id 9596bf5d77d07cab3ce64e43d74d871a55a16f20e414d7a40514d9b3975d2fdd Jan 20 18:01:01 crc kubenswrapper[4995]: I0120 18:01:01.052622 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29482201-zttwb"] Jan 20 18:01:01 crc kubenswrapper[4995]: I0120 18:01:01.679271 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482201-zttwb" event={"ID":"a190e372-ffc1-43d0-bb58-b44814c479ed","Type":"ContainerStarted","Data":"db78bfc1d9a9f4d7e7a4afe97121a5c0cce2e8c95b5036e358ded805a74b7903"} Jan 20 18:01:01 crc kubenswrapper[4995]: I0120 18:01:01.679616 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482201-zttwb" event={"ID":"a190e372-ffc1-43d0-bb58-b44814c479ed","Type":"ContainerStarted","Data":"9596bf5d77d07cab3ce64e43d74d871a55a16f20e414d7a40514d9b3975d2fdd"} Jan 20 18:01:01 crc kubenswrapper[4995]: I0120 18:01:01.697407 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29482201-zttwb" podStartSLOduration=1.69737232 podStartE2EDuration="1.69737232s" podCreationTimestamp="2026-01-20 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 18:01:01.691653315 +0000 UTC m=+5379.936258121" watchObservedRunningTime="2026-01-20 18:01:01.69737232 +0000 UTC m=+5379.941977126" Jan 20 18:01:04 crc kubenswrapper[4995]: I0120 18:01:04.707527 4995 generic.go:334] "Generic (PLEG): container finished" podID="a190e372-ffc1-43d0-bb58-b44814c479ed" containerID="db78bfc1d9a9f4d7e7a4afe97121a5c0cce2e8c95b5036e358ded805a74b7903" exitCode=0 Jan 20 18:01:04 crc kubenswrapper[4995]: I0120 18:01:04.707624 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482201-zttwb" event={"ID":"a190e372-ffc1-43d0-bb58-b44814c479ed","Type":"ContainerDied","Data":"db78bfc1d9a9f4d7e7a4afe97121a5c0cce2e8c95b5036e358ded805a74b7903"} Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.086175 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.169962 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-fernet-keys\") pod \"a190e372-ffc1-43d0-bb58-b44814c479ed\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.170219 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mckhz\" (UniqueName: \"kubernetes.io/projected/a190e372-ffc1-43d0-bb58-b44814c479ed-kube-api-access-mckhz\") pod \"a190e372-ffc1-43d0-bb58-b44814c479ed\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.171351 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-config-data\") pod \"a190e372-ffc1-43d0-bb58-b44814c479ed\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.171456 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-combined-ca-bundle\") pod \"a190e372-ffc1-43d0-bb58-b44814c479ed\" (UID: \"a190e372-ffc1-43d0-bb58-b44814c479ed\") " Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.176252 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a190e372-ffc1-43d0-bb58-b44814c479ed-kube-api-access-mckhz" (OuterVolumeSpecName: "kube-api-access-mckhz") pod "a190e372-ffc1-43d0-bb58-b44814c479ed" (UID: "a190e372-ffc1-43d0-bb58-b44814c479ed"). InnerVolumeSpecName "kube-api-access-mckhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.176577 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a190e372-ffc1-43d0-bb58-b44814c479ed" (UID: "a190e372-ffc1-43d0-bb58-b44814c479ed"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.208006 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a190e372-ffc1-43d0-bb58-b44814c479ed" (UID: "a190e372-ffc1-43d0-bb58-b44814c479ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.229744 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-config-data" (OuterVolumeSpecName: "config-data") pod "a190e372-ffc1-43d0-bb58-b44814c479ed" (UID: "a190e372-ffc1-43d0-bb58-b44814c479ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.275206 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.275337 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.275415 4995 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a190e372-ffc1-43d0-bb58-b44814c479ed-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.275538 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mckhz\" (UniqueName: \"kubernetes.io/projected/a190e372-ffc1-43d0-bb58-b44814c479ed-kube-api-access-mckhz\") on node \"crc\" DevicePath \"\"" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.729035 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482201-zttwb" event={"ID":"a190e372-ffc1-43d0-bb58-b44814c479ed","Type":"ContainerDied","Data":"9596bf5d77d07cab3ce64e43d74d871a55a16f20e414d7a40514d9b3975d2fdd"} Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.729347 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9596bf5d77d07cab3ce64e43d74d871a55a16f20e414d7a40514d9b3975d2fdd" Jan 20 18:01:06 crc kubenswrapper[4995]: I0120 18:01:06.729111 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482201-zttwb" Jan 20 18:02:00 crc kubenswrapper[4995]: I0120 18:02:00.572305 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:02:00 crc kubenswrapper[4995]: I0120 18:02:00.575064 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.138272 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-v2wr5"] Jan 20 18:02:23 crc kubenswrapper[4995]: E0120 18:02:23.140317 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a190e372-ffc1-43d0-bb58-b44814c479ed" containerName="keystone-cron" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.140344 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a190e372-ffc1-43d0-bb58-b44814c479ed" containerName="keystone-cron" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.140696 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="a190e372-ffc1-43d0-bb58-b44814c479ed" containerName="keystone-cron" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.142937 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.153247 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2wr5"] Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.191869 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-utilities\") pod \"redhat-marketplace-v2wr5\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.194329 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrkfp\" (UniqueName: \"kubernetes.io/projected/208fd9fa-68d7-48b0-97a9-aecab8ee2785-kube-api-access-qrkfp\") pod \"redhat-marketplace-v2wr5\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.195150 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-catalog-content\") pod \"redhat-marketplace-v2wr5\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.298164 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-utilities\") pod \"redhat-marketplace-v2wr5\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.298236 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrkfp\" (UniqueName: \"kubernetes.io/projected/208fd9fa-68d7-48b0-97a9-aecab8ee2785-kube-api-access-qrkfp\") pod \"redhat-marketplace-v2wr5\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.298254 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-catalog-content\") pod \"redhat-marketplace-v2wr5\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.298938 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-catalog-content\") pod \"redhat-marketplace-v2wr5\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.299216 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-utilities\") pod \"redhat-marketplace-v2wr5\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.321795 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrkfp\" (UniqueName: \"kubernetes.io/projected/208fd9fa-68d7-48b0-97a9-aecab8ee2785-kube-api-access-qrkfp\") pod \"redhat-marketplace-v2wr5\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:23 crc kubenswrapper[4995]: I0120 18:02:23.495116 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:24 crc kubenswrapper[4995]: I0120 18:02:24.014327 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2wr5"] Jan 20 18:02:24 crc kubenswrapper[4995]: W0120 18:02:24.015306 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod208fd9fa_68d7_48b0_97a9_aecab8ee2785.slice/crio-5e9f6961dd031c0c12db5578c166abff7240615fad1b10a3f97319e5287b4d08 WatchSource:0}: Error finding container 5e9f6961dd031c0c12db5578c166abff7240615fad1b10a3f97319e5287b4d08: Status 404 returned error can't find the container with id 5e9f6961dd031c0c12db5578c166abff7240615fad1b10a3f97319e5287b4d08 Jan 20 18:02:24 crc kubenswrapper[4995]: I0120 18:02:24.634824 4995 generic.go:334] "Generic (PLEG): container finished" podID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerID="910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be" exitCode=0 Jan 20 18:02:24 crc kubenswrapper[4995]: I0120 18:02:24.634899 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2wr5" event={"ID":"208fd9fa-68d7-48b0-97a9-aecab8ee2785","Type":"ContainerDied","Data":"910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be"} Jan 20 18:02:24 crc kubenswrapper[4995]: I0120 18:02:24.634939 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2wr5" event={"ID":"208fd9fa-68d7-48b0-97a9-aecab8ee2785","Type":"ContainerStarted","Data":"5e9f6961dd031c0c12db5578c166abff7240615fad1b10a3f97319e5287b4d08"} Jan 20 18:02:24 crc kubenswrapper[4995]: I0120 18:02:24.639243 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 18:02:26 crc kubenswrapper[4995]: I0120 18:02:26.655746 4995 generic.go:334] "Generic (PLEG): container finished" podID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerID="3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e" exitCode=0 Jan 20 18:02:26 crc kubenswrapper[4995]: I0120 18:02:26.655822 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2wr5" event={"ID":"208fd9fa-68d7-48b0-97a9-aecab8ee2785","Type":"ContainerDied","Data":"3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e"} Jan 20 18:02:27 crc kubenswrapper[4995]: I0120 18:02:27.669977 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2wr5" event={"ID":"208fd9fa-68d7-48b0-97a9-aecab8ee2785","Type":"ContainerStarted","Data":"f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2"} Jan 20 18:02:27 crc kubenswrapper[4995]: I0120 18:02:27.705797 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-v2wr5" podStartSLOduration=2.242348845 podStartE2EDuration="4.705748111s" podCreationTimestamp="2026-01-20 18:02:23 +0000 UTC" firstStartedPulling="2026-01-20 18:02:24.638824533 +0000 UTC m=+5462.883429379" lastFinishedPulling="2026-01-20 18:02:27.102223829 +0000 UTC m=+5465.346828645" observedRunningTime="2026-01-20 18:02:27.692252404 +0000 UTC m=+5465.936857240" watchObservedRunningTime="2026-01-20 18:02:27.705748111 +0000 UTC m=+5465.950352937" Jan 20 18:02:30 crc kubenswrapper[4995]: I0120 18:02:30.571803 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:02:30 crc kubenswrapper[4995]: I0120 18:02:30.572336 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:02:33 crc kubenswrapper[4995]: I0120 18:02:33.496327 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:33 crc kubenswrapper[4995]: I0120 18:02:33.497886 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:33 crc kubenswrapper[4995]: I0120 18:02:33.575677 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:33 crc kubenswrapper[4995]: I0120 18:02:33.837290 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:33 crc kubenswrapper[4995]: I0120 18:02:33.908405 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2wr5"] Jan 20 18:02:35 crc kubenswrapper[4995]: I0120 18:02:35.786835 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-v2wr5" podUID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerName="registry-server" containerID="cri-o://f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2" gracePeriod=2 Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.290976 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.399242 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-catalog-content\") pod \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.399557 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-utilities\") pod \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.399767 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrkfp\" (UniqueName: \"kubernetes.io/projected/208fd9fa-68d7-48b0-97a9-aecab8ee2785-kube-api-access-qrkfp\") pod \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\" (UID: \"208fd9fa-68d7-48b0-97a9-aecab8ee2785\") " Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.400561 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-utilities" (OuterVolumeSpecName: "utilities") pod "208fd9fa-68d7-48b0-97a9-aecab8ee2785" (UID: "208fd9fa-68d7-48b0-97a9-aecab8ee2785"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.415006 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/208fd9fa-68d7-48b0-97a9-aecab8ee2785-kube-api-access-qrkfp" (OuterVolumeSpecName: "kube-api-access-qrkfp") pod "208fd9fa-68d7-48b0-97a9-aecab8ee2785" (UID: "208fd9fa-68d7-48b0-97a9-aecab8ee2785"). InnerVolumeSpecName "kube-api-access-qrkfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.431400 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "208fd9fa-68d7-48b0-97a9-aecab8ee2785" (UID: "208fd9fa-68d7-48b0-97a9-aecab8ee2785"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.502381 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.502409 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/208fd9fa-68d7-48b0-97a9-aecab8ee2785-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.502419 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrkfp\" (UniqueName: \"kubernetes.io/projected/208fd9fa-68d7-48b0-97a9-aecab8ee2785-kube-api-access-qrkfp\") on node \"crc\" DevicePath \"\"" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.796028 4995 generic.go:334] "Generic (PLEG): container finished" podID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerID="f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2" exitCode=0 Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.796066 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2wr5" event={"ID":"208fd9fa-68d7-48b0-97a9-aecab8ee2785","Type":"ContainerDied","Data":"f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2"} Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.796108 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v2wr5" event={"ID":"208fd9fa-68d7-48b0-97a9-aecab8ee2785","Type":"ContainerDied","Data":"5e9f6961dd031c0c12db5578c166abff7240615fad1b10a3f97319e5287b4d08"} Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.796127 4995 scope.go:117] "RemoveContainer" containerID="f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.796269 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v2wr5" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.827812 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2wr5"] Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.829222 4995 scope.go:117] "RemoveContainer" containerID="3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.836402 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-v2wr5"] Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.850541 4995 scope.go:117] "RemoveContainer" containerID="910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.897261 4995 scope.go:117] "RemoveContainer" containerID="f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2" Jan 20 18:02:36 crc kubenswrapper[4995]: E0120 18:02:36.897770 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2\": container with ID starting with f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2 not found: ID does not exist" containerID="f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.897803 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2"} err="failed to get container status \"f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2\": rpc error: code = NotFound desc = could not find container \"f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2\": container with ID starting with f313f4bdc31e59d96a517f9a7ed84579242da2ba1ef1a8fb972919924ebeb1a2 not found: ID does not exist" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.897823 4995 scope.go:117] "RemoveContainer" containerID="3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e" Jan 20 18:02:36 crc kubenswrapper[4995]: E0120 18:02:36.898292 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e\": container with ID starting with 3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e not found: ID does not exist" containerID="3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.898338 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e"} err="failed to get container status \"3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e\": rpc error: code = NotFound desc = could not find container \"3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e\": container with ID starting with 3cee3a57c644a1473fa12b9ddf881ab4ae29ad693fb9708214a9fc5b04a55f1e not found: ID does not exist" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.898368 4995 scope.go:117] "RemoveContainer" containerID="910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be" Jan 20 18:02:36 crc kubenswrapper[4995]: E0120 18:02:36.898730 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be\": container with ID starting with 910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be not found: ID does not exist" containerID="910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be" Jan 20 18:02:36 crc kubenswrapper[4995]: I0120 18:02:36.898755 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be"} err="failed to get container status \"910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be\": rpc error: code = NotFound desc = could not find container \"910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be\": container with ID starting with 910be3b38fca6a78f49b1b3ebe705c85e19e7a218720dc33af38eab1d57869be not found: ID does not exist" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.439208 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vrqjg"] Jan 20 18:02:37 crc kubenswrapper[4995]: E0120 18:02:37.439925 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerName="registry-server" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.439937 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerName="registry-server" Jan 20 18:02:37 crc kubenswrapper[4995]: E0120 18:02:37.439958 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerName="extract-content" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.439964 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerName="extract-content" Jan 20 18:02:37 crc kubenswrapper[4995]: E0120 18:02:37.440013 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerName="extract-utilities" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.440020 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerName="extract-utilities" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.440221 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" containerName="registry-server" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.441565 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.457875 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vrqjg"] Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.521228 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-utilities\") pod \"redhat-operators-vrqjg\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.521331 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw8c5\" (UniqueName: \"kubernetes.io/projected/ea29ff27-eca7-480f-bc99-905d14fc6a11-kube-api-access-mw8c5\") pod \"redhat-operators-vrqjg\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.521365 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-catalog-content\") pod \"redhat-operators-vrqjg\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.623872 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-utilities\") pod \"redhat-operators-vrqjg\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.623976 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw8c5\" (UniqueName: \"kubernetes.io/projected/ea29ff27-eca7-480f-bc99-905d14fc6a11-kube-api-access-mw8c5\") pod \"redhat-operators-vrqjg\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.624014 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-catalog-content\") pod \"redhat-operators-vrqjg\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.624482 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-utilities\") pod \"redhat-operators-vrqjg\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.624517 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-catalog-content\") pod \"redhat-operators-vrqjg\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.644225 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw8c5\" (UniqueName: \"kubernetes.io/projected/ea29ff27-eca7-480f-bc99-905d14fc6a11-kube-api-access-mw8c5\") pod \"redhat-operators-vrqjg\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:37 crc kubenswrapper[4995]: I0120 18:02:37.769560 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:38 crc kubenswrapper[4995]: I0120 18:02:38.004414 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="208fd9fa-68d7-48b0-97a9-aecab8ee2785" path="/var/lib/kubelet/pods/208fd9fa-68d7-48b0-97a9-aecab8ee2785/volumes" Jan 20 18:02:38 crc kubenswrapper[4995]: I0120 18:02:38.239234 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vrqjg"] Jan 20 18:02:38 crc kubenswrapper[4995]: I0120 18:02:38.820041 4995 generic.go:334] "Generic (PLEG): container finished" podID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerID="89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c" exitCode=0 Jan 20 18:02:38 crc kubenswrapper[4995]: I0120 18:02:38.820116 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjg" event={"ID":"ea29ff27-eca7-480f-bc99-905d14fc6a11","Type":"ContainerDied","Data":"89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c"} Jan 20 18:02:38 crc kubenswrapper[4995]: I0120 18:02:38.820480 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjg" event={"ID":"ea29ff27-eca7-480f-bc99-905d14fc6a11","Type":"ContainerStarted","Data":"f62670fe5bca92671dd94b0826e04f904d8afbb91720a5765d605941dd72bfc8"} Jan 20 18:02:40 crc kubenswrapper[4995]: I0120 18:02:40.860333 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjg" event={"ID":"ea29ff27-eca7-480f-bc99-905d14fc6a11","Type":"ContainerStarted","Data":"d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad"} Jan 20 18:02:41 crc kubenswrapper[4995]: I0120 18:02:41.871173 4995 generic.go:334] "Generic (PLEG): container finished" podID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerID="d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad" exitCode=0 Jan 20 18:02:41 crc kubenswrapper[4995]: I0120 18:02:41.871255 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjg" event={"ID":"ea29ff27-eca7-480f-bc99-905d14fc6a11","Type":"ContainerDied","Data":"d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad"} Jan 20 18:02:43 crc kubenswrapper[4995]: I0120 18:02:43.892267 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjg" event={"ID":"ea29ff27-eca7-480f-bc99-905d14fc6a11","Type":"ContainerStarted","Data":"235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e"} Jan 20 18:02:43 crc kubenswrapper[4995]: I0120 18:02:43.916548 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vrqjg" podStartSLOduration=3.347764053 podStartE2EDuration="6.916528739s" podCreationTimestamp="2026-01-20 18:02:37 +0000 UTC" firstStartedPulling="2026-01-20 18:02:38.821777093 +0000 UTC m=+5477.066381899" lastFinishedPulling="2026-01-20 18:02:42.390541779 +0000 UTC m=+5480.635146585" observedRunningTime="2026-01-20 18:02:43.907489074 +0000 UTC m=+5482.152093880" watchObservedRunningTime="2026-01-20 18:02:43.916528739 +0000 UTC m=+5482.161133545" Jan 20 18:02:47 crc kubenswrapper[4995]: I0120 18:02:47.771182 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:47 crc kubenswrapper[4995]: I0120 18:02:47.771708 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:48 crc kubenswrapper[4995]: I0120 18:02:48.829125 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vrqjg" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerName="registry-server" probeResult="failure" output=< Jan 20 18:02:48 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 18:02:48 crc kubenswrapper[4995]: > Jan 20 18:02:57 crc kubenswrapper[4995]: I0120 18:02:57.833249 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:57 crc kubenswrapper[4995]: I0120 18:02:57.910402 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:58 crc kubenswrapper[4995]: I0120 18:02:58.078394 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vrqjg"] Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.076368 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vrqjg" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerName="registry-server" containerID="cri-o://235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e" gracePeriod=2 Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.564412 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.664778 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mw8c5\" (UniqueName: \"kubernetes.io/projected/ea29ff27-eca7-480f-bc99-905d14fc6a11-kube-api-access-mw8c5\") pod \"ea29ff27-eca7-480f-bc99-905d14fc6a11\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.664853 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-utilities\") pod \"ea29ff27-eca7-480f-bc99-905d14fc6a11\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.664883 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-catalog-content\") pod \"ea29ff27-eca7-480f-bc99-905d14fc6a11\" (UID: \"ea29ff27-eca7-480f-bc99-905d14fc6a11\") " Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.666396 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-utilities" (OuterVolumeSpecName: "utilities") pod "ea29ff27-eca7-480f-bc99-905d14fc6a11" (UID: "ea29ff27-eca7-480f-bc99-905d14fc6a11"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.673199 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea29ff27-eca7-480f-bc99-905d14fc6a11-kube-api-access-mw8c5" (OuterVolumeSpecName: "kube-api-access-mw8c5") pod "ea29ff27-eca7-480f-bc99-905d14fc6a11" (UID: "ea29ff27-eca7-480f-bc99-905d14fc6a11"). InnerVolumeSpecName "kube-api-access-mw8c5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.767677 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mw8c5\" (UniqueName: \"kubernetes.io/projected/ea29ff27-eca7-480f-bc99-905d14fc6a11-kube-api-access-mw8c5\") on node \"crc\" DevicePath \"\"" Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.767725 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.810095 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ea29ff27-eca7-480f-bc99-905d14fc6a11" (UID: "ea29ff27-eca7-480f-bc99-905d14fc6a11"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:02:59 crc kubenswrapper[4995]: I0120 18:02:59.869610 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ea29ff27-eca7-480f-bc99-905d14fc6a11-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.087956 4995 generic.go:334] "Generic (PLEG): container finished" podID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerID="235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e" exitCode=0 Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.088004 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjg" event={"ID":"ea29ff27-eca7-480f-bc99-905d14fc6a11","Type":"ContainerDied","Data":"235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e"} Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.088036 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrqjg" event={"ID":"ea29ff27-eca7-480f-bc99-905d14fc6a11","Type":"ContainerDied","Data":"f62670fe5bca92671dd94b0826e04f904d8afbb91720a5765d605941dd72bfc8"} Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.088064 4995 scope.go:117] "RemoveContainer" containerID="235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.088274 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrqjg" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.126361 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vrqjg"] Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.131546 4995 scope.go:117] "RemoveContainer" containerID="d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.150701 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vrqjg"] Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.169128 4995 scope.go:117] "RemoveContainer" containerID="89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.199508 4995 scope.go:117] "RemoveContainer" containerID="235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e" Jan 20 18:03:00 crc kubenswrapper[4995]: E0120 18:03:00.199908 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e\": container with ID starting with 235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e not found: ID does not exist" containerID="235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.199951 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e"} err="failed to get container status \"235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e\": rpc error: code = NotFound desc = could not find container \"235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e\": container with ID starting with 235b47973e1cb83067100603b5a53a3ef9cad3e5ee6fd519d2f93d74500e115e not found: ID does not exist" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.199977 4995 scope.go:117] "RemoveContainer" containerID="d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad" Jan 20 18:03:00 crc kubenswrapper[4995]: E0120 18:03:00.200512 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad\": container with ID starting with d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad not found: ID does not exist" containerID="d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.200546 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad"} err="failed to get container status \"d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad\": rpc error: code = NotFound desc = could not find container \"d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad\": container with ID starting with d3923e2cda0afc79d74ae41995046d41371b76b26ca0723b2fda6a57cb8602ad not found: ID does not exist" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.200562 4995 scope.go:117] "RemoveContainer" containerID="89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c" Jan 20 18:03:00 crc kubenswrapper[4995]: E0120 18:03:00.201164 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c\": container with ID starting with 89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c not found: ID does not exist" containerID="89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.201190 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c"} err="failed to get container status \"89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c\": rpc error: code = NotFound desc = could not find container \"89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c\": container with ID starting with 89eca82ab0f1d640dfb597684dbb7bfd4d06b0acb02e226b61147a1e77a7910c not found: ID does not exist" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.571960 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.572157 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.572251 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.573726 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c108ef8e65c0aab2d079e6f81528b8426ba20adcd99ad2e4a31d4644ad933328"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:03:00 crc kubenswrapper[4995]: I0120 18:03:00.573882 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://c108ef8e65c0aab2d079e6f81528b8426ba20adcd99ad2e4a31d4644ad933328" gracePeriod=600 Jan 20 18:03:01 crc kubenswrapper[4995]: I0120 18:03:01.125504 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="c108ef8e65c0aab2d079e6f81528b8426ba20adcd99ad2e4a31d4644ad933328" exitCode=0 Jan 20 18:03:01 crc kubenswrapper[4995]: I0120 18:03:01.125900 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"c108ef8e65c0aab2d079e6f81528b8426ba20adcd99ad2e4a31d4644ad933328"} Jan 20 18:03:01 crc kubenswrapper[4995]: I0120 18:03:01.126104 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17"} Jan 20 18:03:01 crc kubenswrapper[4995]: I0120 18:03:01.126132 4995 scope.go:117] "RemoveContainer" containerID="e9aebd238688ee8ec6953e47aed6ac0eb67d1c604cbd98c78fc3cc350843048a" Jan 20 18:03:02 crc kubenswrapper[4995]: I0120 18:03:02.000095 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" path="/var/lib/kubelet/pods/ea29ff27-eca7-480f-bc99-905d14fc6a11/volumes" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.169995 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kn9kz"] Jan 20 18:03:34 crc kubenswrapper[4995]: E0120 18:03:34.171101 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerName="extract-utilities" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.171118 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerName="extract-utilities" Jan 20 18:03:34 crc kubenswrapper[4995]: E0120 18:03:34.171136 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerName="extract-content" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.171143 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerName="extract-content" Jan 20 18:03:34 crc kubenswrapper[4995]: E0120 18:03:34.171156 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerName="registry-server" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.171164 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerName="registry-server" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.171434 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea29ff27-eca7-480f-bc99-905d14fc6a11" containerName="registry-server" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.173366 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.198231 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kn9kz"] Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.310364 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-catalog-content\") pod \"community-operators-kn9kz\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.310459 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bdwt\" (UniqueName: \"kubernetes.io/projected/64566ed3-42ec-4671-a462-287a8b003999-kube-api-access-5bdwt\") pod \"community-operators-kn9kz\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.310526 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-utilities\") pod \"community-operators-kn9kz\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.412471 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-utilities\") pod \"community-operators-kn9kz\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.412567 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-catalog-content\") pod \"community-operators-kn9kz\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.412631 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bdwt\" (UniqueName: \"kubernetes.io/projected/64566ed3-42ec-4671-a462-287a8b003999-kube-api-access-5bdwt\") pod \"community-operators-kn9kz\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.413265 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-catalog-content\") pod \"community-operators-kn9kz\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.413631 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-utilities\") pod \"community-operators-kn9kz\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.449929 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bdwt\" (UniqueName: \"kubernetes.io/projected/64566ed3-42ec-4671-a462-287a8b003999-kube-api-access-5bdwt\") pod \"community-operators-kn9kz\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:34 crc kubenswrapper[4995]: I0120 18:03:34.496359 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:35 crc kubenswrapper[4995]: I0120 18:03:35.074610 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kn9kz"] Jan 20 18:03:35 crc kubenswrapper[4995]: I0120 18:03:35.512247 4995 generic.go:334] "Generic (PLEG): container finished" podID="64566ed3-42ec-4671-a462-287a8b003999" containerID="90d53cbc966af756d894ba223fa1eace1f08a258443bad85a80622bc405ffe96" exitCode=0 Jan 20 18:03:35 crc kubenswrapper[4995]: I0120 18:03:35.512308 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn9kz" event={"ID":"64566ed3-42ec-4671-a462-287a8b003999","Type":"ContainerDied","Data":"90d53cbc966af756d894ba223fa1eace1f08a258443bad85a80622bc405ffe96"} Jan 20 18:03:35 crc kubenswrapper[4995]: I0120 18:03:35.512539 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn9kz" event={"ID":"64566ed3-42ec-4671-a462-287a8b003999","Type":"ContainerStarted","Data":"16b2cf4dc2aa3aab7e83c82f6951bbfe7af30b52606186ec4428b503d745aa0d"} Jan 20 18:03:36 crc kubenswrapper[4995]: I0120 18:03:36.524562 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn9kz" event={"ID":"64566ed3-42ec-4671-a462-287a8b003999","Type":"ContainerStarted","Data":"f4da340d1f6588b426deba4ada9a99c9ebc86e93359f009c3d29820fe2cc8676"} Jan 20 18:03:37 crc kubenswrapper[4995]: I0120 18:03:37.537824 4995 generic.go:334] "Generic (PLEG): container finished" podID="64566ed3-42ec-4671-a462-287a8b003999" containerID="f4da340d1f6588b426deba4ada9a99c9ebc86e93359f009c3d29820fe2cc8676" exitCode=0 Jan 20 18:03:37 crc kubenswrapper[4995]: I0120 18:03:37.537893 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn9kz" event={"ID":"64566ed3-42ec-4671-a462-287a8b003999","Type":"ContainerDied","Data":"f4da340d1f6588b426deba4ada9a99c9ebc86e93359f009c3d29820fe2cc8676"} Jan 20 18:03:38 crc kubenswrapper[4995]: I0120 18:03:38.552287 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn9kz" event={"ID":"64566ed3-42ec-4671-a462-287a8b003999","Type":"ContainerStarted","Data":"1a02dd85b949f25dcb4e71060e2105a117be6806ce4112a6511e2a4d9d1d3d7d"} Jan 20 18:03:38 crc kubenswrapper[4995]: I0120 18:03:38.587211 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kn9kz" podStartSLOduration=2.1285399 podStartE2EDuration="4.587188306s" podCreationTimestamp="2026-01-20 18:03:34 +0000 UTC" firstStartedPulling="2026-01-20 18:03:35.51405633 +0000 UTC m=+5533.758661136" lastFinishedPulling="2026-01-20 18:03:37.972704716 +0000 UTC m=+5536.217309542" observedRunningTime="2026-01-20 18:03:38.575256632 +0000 UTC m=+5536.819861498" watchObservedRunningTime="2026-01-20 18:03:38.587188306 +0000 UTC m=+5536.831793132" Jan 20 18:03:44 crc kubenswrapper[4995]: I0120 18:03:44.497046 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:44 crc kubenswrapper[4995]: I0120 18:03:44.498053 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:44 crc kubenswrapper[4995]: I0120 18:03:44.556515 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:44 crc kubenswrapper[4995]: I0120 18:03:44.660959 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:44 crc kubenswrapper[4995]: I0120 18:03:44.794774 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kn9kz"] Jan 20 18:03:46 crc kubenswrapper[4995]: I0120 18:03:46.620390 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kn9kz" podUID="64566ed3-42ec-4671-a462-287a8b003999" containerName="registry-server" containerID="cri-o://1a02dd85b949f25dcb4e71060e2105a117be6806ce4112a6511e2a4d9d1d3d7d" gracePeriod=2 Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.631113 4995 generic.go:334] "Generic (PLEG): container finished" podID="64566ed3-42ec-4671-a462-287a8b003999" containerID="1a02dd85b949f25dcb4e71060e2105a117be6806ce4112a6511e2a4d9d1d3d7d" exitCode=0 Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.631189 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn9kz" event={"ID":"64566ed3-42ec-4671-a462-287a8b003999","Type":"ContainerDied","Data":"1a02dd85b949f25dcb4e71060e2105a117be6806ce4112a6511e2a4d9d1d3d7d"} Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.770525 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.899473 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-utilities\") pod \"64566ed3-42ec-4671-a462-287a8b003999\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.899530 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-catalog-content\") pod \"64566ed3-42ec-4671-a462-287a8b003999\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.900753 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-utilities" (OuterVolumeSpecName: "utilities") pod "64566ed3-42ec-4671-a462-287a8b003999" (UID: "64566ed3-42ec-4671-a462-287a8b003999"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.918352 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bdwt\" (UniqueName: \"kubernetes.io/projected/64566ed3-42ec-4671-a462-287a8b003999-kube-api-access-5bdwt\") pod \"64566ed3-42ec-4671-a462-287a8b003999\" (UID: \"64566ed3-42ec-4671-a462-287a8b003999\") " Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.919148 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.925156 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64566ed3-42ec-4671-a462-287a8b003999-kube-api-access-5bdwt" (OuterVolumeSpecName: "kube-api-access-5bdwt") pod "64566ed3-42ec-4671-a462-287a8b003999" (UID: "64566ed3-42ec-4671-a462-287a8b003999"). InnerVolumeSpecName "kube-api-access-5bdwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:03:47 crc kubenswrapper[4995]: I0120 18:03:47.959182 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "64566ed3-42ec-4671-a462-287a8b003999" (UID: "64566ed3-42ec-4671-a462-287a8b003999"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:03:48 crc kubenswrapper[4995]: I0120 18:03:48.020644 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64566ed3-42ec-4671-a462-287a8b003999-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:03:48 crc kubenswrapper[4995]: I0120 18:03:48.020673 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bdwt\" (UniqueName: \"kubernetes.io/projected/64566ed3-42ec-4671-a462-287a8b003999-kube-api-access-5bdwt\") on node \"crc\" DevicePath \"\"" Jan 20 18:03:48 crc kubenswrapper[4995]: I0120 18:03:48.648219 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kn9kz" event={"ID":"64566ed3-42ec-4671-a462-287a8b003999","Type":"ContainerDied","Data":"16b2cf4dc2aa3aab7e83c82f6951bbfe7af30b52606186ec4428b503d745aa0d"} Jan 20 18:03:48 crc kubenswrapper[4995]: I0120 18:03:48.648549 4995 scope.go:117] "RemoveContainer" containerID="1a02dd85b949f25dcb4e71060e2105a117be6806ce4112a6511e2a4d9d1d3d7d" Jan 20 18:03:48 crc kubenswrapper[4995]: I0120 18:03:48.648755 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kn9kz" Jan 20 18:03:48 crc kubenswrapper[4995]: I0120 18:03:48.681464 4995 scope.go:117] "RemoveContainer" containerID="f4da340d1f6588b426deba4ada9a99c9ebc86e93359f009c3d29820fe2cc8676" Jan 20 18:03:48 crc kubenswrapper[4995]: I0120 18:03:48.696984 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kn9kz"] Jan 20 18:03:48 crc kubenswrapper[4995]: I0120 18:03:48.708434 4995 scope.go:117] "RemoveContainer" containerID="90d53cbc966af756d894ba223fa1eace1f08a258443bad85a80622bc405ffe96" Jan 20 18:03:48 crc kubenswrapper[4995]: I0120 18:03:48.711370 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kn9kz"] Jan 20 18:03:49 crc kubenswrapper[4995]: I0120 18:03:49.999326 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64566ed3-42ec-4671-a462-287a8b003999" path="/var/lib/kubelet/pods/64566ed3-42ec-4671-a462-287a8b003999/volumes" Jan 20 18:05:00 crc kubenswrapper[4995]: I0120 18:05:00.571454 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:05:00 crc kubenswrapper[4995]: I0120 18:05:00.571917 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:05:30 crc kubenswrapper[4995]: I0120 18:05:30.572196 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:05:30 crc kubenswrapper[4995]: I0120 18:05:30.573038 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:06:00 crc kubenswrapper[4995]: I0120 18:06:00.571912 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:06:00 crc kubenswrapper[4995]: I0120 18:06:00.572482 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:06:00 crc kubenswrapper[4995]: I0120 18:06:00.572525 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:06:00 crc kubenswrapper[4995]: I0120 18:06:00.573233 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:06:00 crc kubenswrapper[4995]: I0120 18:06:00.573286 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" gracePeriod=600 Jan 20 18:06:00 crc kubenswrapper[4995]: E0120 18:06:00.692257 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:06:01 crc kubenswrapper[4995]: I0120 18:06:01.135436 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" exitCode=0 Jan 20 18:06:01 crc kubenswrapper[4995]: I0120 18:06:01.135622 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17"} Jan 20 18:06:01 crc kubenswrapper[4995]: I0120 18:06:01.135852 4995 scope.go:117] "RemoveContainer" containerID="c108ef8e65c0aab2d079e6f81528b8426ba20adcd99ad2e4a31d4644ad933328" Jan 20 18:06:01 crc kubenswrapper[4995]: I0120 18:06:01.136606 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:06:01 crc kubenswrapper[4995]: E0120 18:06:01.137017 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:06:15 crc kubenswrapper[4995]: I0120 18:06:15.994764 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:06:15 crc kubenswrapper[4995]: E0120 18:06:15.995507 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:06:30 crc kubenswrapper[4995]: I0120 18:06:30.990290 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:06:30 crc kubenswrapper[4995]: E0120 18:06:30.991104 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:06:41 crc kubenswrapper[4995]: I0120 18:06:41.997444 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:06:41 crc kubenswrapper[4995]: E0120 18:06:41.998443 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:06:53 crc kubenswrapper[4995]: I0120 18:06:53.990037 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:06:53 crc kubenswrapper[4995]: E0120 18:06:53.991312 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:07:06 crc kubenswrapper[4995]: I0120 18:07:06.990270 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:07:06 crc kubenswrapper[4995]: E0120 18:07:06.991178 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:07:19 crc kubenswrapper[4995]: I0120 18:07:19.990263 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:07:19 crc kubenswrapper[4995]: E0120 18:07:19.990975 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:07:34 crc kubenswrapper[4995]: I0120 18:07:34.989310 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:07:34 crc kubenswrapper[4995]: E0120 18:07:34.990323 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:07:48 crc kubenswrapper[4995]: I0120 18:07:48.990459 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:07:48 crc kubenswrapper[4995]: E0120 18:07:48.991831 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:07:59 crc kubenswrapper[4995]: I0120 18:07:59.989966 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:07:59 crc kubenswrapper[4995]: E0120 18:07:59.991786 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:08:14 crc kubenswrapper[4995]: I0120 18:08:14.990046 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:08:14 crc kubenswrapper[4995]: E0120 18:08:14.992722 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:08:29 crc kubenswrapper[4995]: I0120 18:08:29.990006 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:08:29 crc kubenswrapper[4995]: E0120 18:08:29.990792 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:08:43 crc kubenswrapper[4995]: I0120 18:08:43.989768 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:08:43 crc kubenswrapper[4995]: E0120 18:08:43.990570 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:08:56 crc kubenswrapper[4995]: I0120 18:08:56.989733 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:08:56 crc kubenswrapper[4995]: E0120 18:08:56.990483 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:09:10 crc kubenswrapper[4995]: I0120 18:09:10.990336 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:09:10 crc kubenswrapper[4995]: E0120 18:09:10.993162 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:09:22 crc kubenswrapper[4995]: I0120 18:09:22.989703 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:09:22 crc kubenswrapper[4995]: E0120 18:09:22.990731 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:09:36 crc kubenswrapper[4995]: I0120 18:09:36.992252 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:09:36 crc kubenswrapper[4995]: E0120 18:09:36.993671 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:09:52 crc kubenswrapper[4995]: I0120 18:09:52.004562 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:09:52 crc kubenswrapper[4995]: E0120 18:09:52.005457 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:10:04 crc kubenswrapper[4995]: I0120 18:10:04.990013 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:10:04 crc kubenswrapper[4995]: E0120 18:10:04.990977 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:10:16 crc kubenswrapper[4995]: I0120 18:10:16.989791 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:10:16 crc kubenswrapper[4995]: E0120 18:10:16.992067 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:10:31 crc kubenswrapper[4995]: I0120 18:10:31.996428 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:10:31 crc kubenswrapper[4995]: E0120 18:10:31.996975 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.001885 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dqn2f"] Jan 20 18:10:36 crc kubenswrapper[4995]: E0120 18:10:36.002885 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64566ed3-42ec-4671-a462-287a8b003999" containerName="extract-content" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.002900 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="64566ed3-42ec-4671-a462-287a8b003999" containerName="extract-content" Jan 20 18:10:36 crc kubenswrapper[4995]: E0120 18:10:36.002943 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64566ed3-42ec-4671-a462-287a8b003999" containerName="extract-utilities" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.002951 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="64566ed3-42ec-4671-a462-287a8b003999" containerName="extract-utilities" Jan 20 18:10:36 crc kubenswrapper[4995]: E0120 18:10:36.002975 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64566ed3-42ec-4671-a462-287a8b003999" containerName="registry-server" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.002983 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="64566ed3-42ec-4671-a462-287a8b003999" containerName="registry-server" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.003447 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="64566ed3-42ec-4671-a462-287a8b003999" containerName="registry-server" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.005892 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.010578 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dqn2f"] Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.108334 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jlkh\" (UniqueName: \"kubernetes.io/projected/68c9c585-20d1-4cd2-a05c-de47214e2c6d-kube-api-access-6jlkh\") pod \"certified-operators-dqn2f\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.108445 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-utilities\") pod \"certified-operators-dqn2f\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.108465 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-catalog-content\") pod \"certified-operators-dqn2f\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.209855 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jlkh\" (UniqueName: \"kubernetes.io/projected/68c9c585-20d1-4cd2-a05c-de47214e2c6d-kube-api-access-6jlkh\") pod \"certified-operators-dqn2f\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.209930 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-utilities\") pod \"certified-operators-dqn2f\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.209947 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-catalog-content\") pod \"certified-operators-dqn2f\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.210489 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-catalog-content\") pod \"certified-operators-dqn2f\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.210586 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-utilities\") pod \"certified-operators-dqn2f\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.232758 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jlkh\" (UniqueName: \"kubernetes.io/projected/68c9c585-20d1-4cd2-a05c-de47214e2c6d-kube-api-access-6jlkh\") pod \"certified-operators-dqn2f\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.328223 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:36 crc kubenswrapper[4995]: I0120 18:10:36.926403 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dqn2f"] Jan 20 18:10:37 crc kubenswrapper[4995]: I0120 18:10:37.192281 4995 generic.go:334] "Generic (PLEG): container finished" podID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerID="f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44" exitCode=0 Jan 20 18:10:37 crc kubenswrapper[4995]: I0120 18:10:37.192317 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dqn2f" event={"ID":"68c9c585-20d1-4cd2-a05c-de47214e2c6d","Type":"ContainerDied","Data":"f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44"} Jan 20 18:10:37 crc kubenswrapper[4995]: I0120 18:10:37.192369 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dqn2f" event={"ID":"68c9c585-20d1-4cd2-a05c-de47214e2c6d","Type":"ContainerStarted","Data":"57342674f33f7e1d294be16fa1eeb16af42c29b31567c427c364db65e09d5845"} Jan 20 18:10:37 crc kubenswrapper[4995]: I0120 18:10:37.194353 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 18:10:40 crc kubenswrapper[4995]: I0120 18:10:40.240322 4995 generic.go:334] "Generic (PLEG): container finished" podID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerID="85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720" exitCode=0 Jan 20 18:10:40 crc kubenswrapper[4995]: I0120 18:10:40.240465 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dqn2f" event={"ID":"68c9c585-20d1-4cd2-a05c-de47214e2c6d","Type":"ContainerDied","Data":"85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720"} Jan 20 18:10:41 crc kubenswrapper[4995]: I0120 18:10:41.265157 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dqn2f" event={"ID":"68c9c585-20d1-4cd2-a05c-de47214e2c6d","Type":"ContainerStarted","Data":"de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c"} Jan 20 18:10:43 crc kubenswrapper[4995]: I0120 18:10:43.990031 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:10:43 crc kubenswrapper[4995]: E0120 18:10:43.991485 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:10:46 crc kubenswrapper[4995]: I0120 18:10:46.329018 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:46 crc kubenswrapper[4995]: I0120 18:10:46.329517 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:46 crc kubenswrapper[4995]: I0120 18:10:46.392985 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:46 crc kubenswrapper[4995]: I0120 18:10:46.416788 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dqn2f" podStartSLOduration=7.921092282 podStartE2EDuration="11.416769411s" podCreationTimestamp="2026-01-20 18:10:35 +0000 UTC" firstStartedPulling="2026-01-20 18:10:37.193964259 +0000 UTC m=+5955.438569065" lastFinishedPulling="2026-01-20 18:10:40.689641388 +0000 UTC m=+5958.934246194" observedRunningTime="2026-01-20 18:10:41.293226342 +0000 UTC m=+5959.537831168" watchObservedRunningTime="2026-01-20 18:10:46.416769411 +0000 UTC m=+5964.661374207" Jan 20 18:10:47 crc kubenswrapper[4995]: I0120 18:10:47.392873 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:47 crc kubenswrapper[4995]: I0120 18:10:47.467467 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dqn2f"] Jan 20 18:10:49 crc kubenswrapper[4995]: I0120 18:10:49.359329 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dqn2f" podUID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerName="registry-server" containerID="cri-o://de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c" gracePeriod=2 Jan 20 18:10:49 crc kubenswrapper[4995]: I0120 18:10:49.887498 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.022914 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-utilities\") pod \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.022997 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jlkh\" (UniqueName: \"kubernetes.io/projected/68c9c585-20d1-4cd2-a05c-de47214e2c6d-kube-api-access-6jlkh\") pod \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.023048 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-catalog-content\") pod \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\" (UID: \"68c9c585-20d1-4cd2-a05c-de47214e2c6d\") " Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.043469 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-utilities" (OuterVolumeSpecName: "utilities") pod "68c9c585-20d1-4cd2-a05c-de47214e2c6d" (UID: "68c9c585-20d1-4cd2-a05c-de47214e2c6d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.058212 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68c9c585-20d1-4cd2-a05c-de47214e2c6d-kube-api-access-6jlkh" (OuterVolumeSpecName: "kube-api-access-6jlkh") pod "68c9c585-20d1-4cd2-a05c-de47214e2c6d" (UID: "68c9c585-20d1-4cd2-a05c-de47214e2c6d"). InnerVolumeSpecName "kube-api-access-6jlkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.100431 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "68c9c585-20d1-4cd2-a05c-de47214e2c6d" (UID: "68c9c585-20d1-4cd2-a05c-de47214e2c6d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.125878 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.125923 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jlkh\" (UniqueName: \"kubernetes.io/projected/68c9c585-20d1-4cd2-a05c-de47214e2c6d-kube-api-access-6jlkh\") on node \"crc\" DevicePath \"\"" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.125933 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68c9c585-20d1-4cd2-a05c-de47214e2c6d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.370584 4995 generic.go:334] "Generic (PLEG): container finished" podID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerID="de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c" exitCode=0 Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.370631 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dqn2f" event={"ID":"68c9c585-20d1-4cd2-a05c-de47214e2c6d","Type":"ContainerDied","Data":"de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c"} Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.370660 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dqn2f" event={"ID":"68c9c585-20d1-4cd2-a05c-de47214e2c6d","Type":"ContainerDied","Data":"57342674f33f7e1d294be16fa1eeb16af42c29b31567c427c364db65e09d5845"} Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.370681 4995 scope.go:117] "RemoveContainer" containerID="de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.370811 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dqn2f" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.395894 4995 scope.go:117] "RemoveContainer" containerID="85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.420361 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dqn2f"] Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.434026 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dqn2f"] Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.446427 4995 scope.go:117] "RemoveContainer" containerID="f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.477414 4995 scope.go:117] "RemoveContainer" containerID="de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c" Jan 20 18:10:50 crc kubenswrapper[4995]: E0120 18:10:50.478064 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c\": container with ID starting with de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c not found: ID does not exist" containerID="de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.478149 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c"} err="failed to get container status \"de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c\": rpc error: code = NotFound desc = could not find container \"de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c\": container with ID starting with de1a3affb7fa7cf7b74ed664bc2684e6b3bd842c39163ec862499a85a6a0369c not found: ID does not exist" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.478185 4995 scope.go:117] "RemoveContainer" containerID="85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720" Jan 20 18:10:50 crc kubenswrapper[4995]: E0120 18:10:50.478667 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720\": container with ID starting with 85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720 not found: ID does not exist" containerID="85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.478714 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720"} err="failed to get container status \"85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720\": rpc error: code = NotFound desc = could not find container \"85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720\": container with ID starting with 85a274bc267c293b9e5ce6d3cbcb262a6e8cde3f1c219a4fff3593bc03e3f720 not found: ID does not exist" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.478741 4995 scope.go:117] "RemoveContainer" containerID="f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44" Jan 20 18:10:50 crc kubenswrapper[4995]: E0120 18:10:50.479125 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44\": container with ID starting with f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44 not found: ID does not exist" containerID="f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44" Jan 20 18:10:50 crc kubenswrapper[4995]: I0120 18:10:50.479168 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44"} err="failed to get container status \"f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44\": rpc error: code = NotFound desc = could not find container \"f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44\": container with ID starting with f0b7af305870177fe8c0ca237971e3959f0f5848e6eef3262eeada850b76bb44 not found: ID does not exist" Jan 20 18:10:52 crc kubenswrapper[4995]: I0120 18:10:52.010600 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" path="/var/lib/kubelet/pods/68c9c585-20d1-4cd2-a05c-de47214e2c6d/volumes" Jan 20 18:10:54 crc kubenswrapper[4995]: I0120 18:10:54.990891 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:10:54 crc kubenswrapper[4995]: E0120 18:10:54.991826 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:11:09 crc kubenswrapper[4995]: I0120 18:11:09.989845 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:11:10 crc kubenswrapper[4995]: I0120 18:11:10.832822 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"3c2f41106cf47e0bd532ff85b56553c1306f7fe3267fd9b4798de41d2bd73ad3"} Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.540920 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bpwxd"] Jan 20 18:12:40 crc kubenswrapper[4995]: E0120 18:12:40.545208 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerName="extract-content" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.545396 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerName="extract-content" Jan 20 18:12:40 crc kubenswrapper[4995]: E0120 18:12:40.545543 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerName="extract-utilities" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.545633 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerName="extract-utilities" Jan 20 18:12:40 crc kubenswrapper[4995]: E0120 18:12:40.545714 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerName="registry-server" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.545816 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerName="registry-server" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.546674 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c9c585-20d1-4cd2-a05c-de47214e2c6d" containerName="registry-server" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.550040 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.562600 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bpwxd"] Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.710209 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-utilities\") pod \"redhat-operators-bpwxd\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.710550 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzh4q\" (UniqueName: \"kubernetes.io/projected/bc91524b-c181-4954-a940-42089bc9900a-kube-api-access-jzh4q\") pod \"redhat-operators-bpwxd\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.710764 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-catalog-content\") pod \"redhat-operators-bpwxd\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.812321 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-catalog-content\") pod \"redhat-operators-bpwxd\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.812428 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-utilities\") pod \"redhat-operators-bpwxd\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.812506 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzh4q\" (UniqueName: \"kubernetes.io/projected/bc91524b-c181-4954-a940-42089bc9900a-kube-api-access-jzh4q\") pod \"redhat-operators-bpwxd\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.812845 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-catalog-content\") pod \"redhat-operators-bpwxd\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.812910 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-utilities\") pod \"redhat-operators-bpwxd\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.838801 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzh4q\" (UniqueName: \"kubernetes.io/projected/bc91524b-c181-4954-a940-42089bc9900a-kube-api-access-jzh4q\") pod \"redhat-operators-bpwxd\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:40 crc kubenswrapper[4995]: I0120 18:12:40.891321 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:41 crc kubenswrapper[4995]: I0120 18:12:41.408623 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bpwxd"] Jan 20 18:12:41 crc kubenswrapper[4995]: I0120 18:12:41.837854 4995 generic.go:334] "Generic (PLEG): container finished" podID="bc91524b-c181-4954-a940-42089bc9900a" containerID="dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647" exitCode=0 Jan 20 18:12:41 crc kubenswrapper[4995]: I0120 18:12:41.837901 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpwxd" event={"ID":"bc91524b-c181-4954-a940-42089bc9900a","Type":"ContainerDied","Data":"dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647"} Jan 20 18:12:41 crc kubenswrapper[4995]: I0120 18:12:41.837929 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpwxd" event={"ID":"bc91524b-c181-4954-a940-42089bc9900a","Type":"ContainerStarted","Data":"90918fb4a962bd14cee4c4c066cffea5f43f0b46f64a0d65c72e19f7ba002f5f"} Jan 20 18:12:43 crc kubenswrapper[4995]: I0120 18:12:43.871304 4995 generic.go:334] "Generic (PLEG): container finished" podID="bc91524b-c181-4954-a940-42089bc9900a" containerID="ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151" exitCode=0 Jan 20 18:12:43 crc kubenswrapper[4995]: I0120 18:12:43.871380 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpwxd" event={"ID":"bc91524b-c181-4954-a940-42089bc9900a","Type":"ContainerDied","Data":"ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151"} Jan 20 18:12:45 crc kubenswrapper[4995]: I0120 18:12:45.909489 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpwxd" event={"ID":"bc91524b-c181-4954-a940-42089bc9900a","Type":"ContainerStarted","Data":"29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1"} Jan 20 18:12:45 crc kubenswrapper[4995]: I0120 18:12:45.952900 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bpwxd" podStartSLOduration=3.123685333 podStartE2EDuration="5.952879s" podCreationTimestamp="2026-01-20 18:12:40 +0000 UTC" firstStartedPulling="2026-01-20 18:12:41.839948996 +0000 UTC m=+6080.084553802" lastFinishedPulling="2026-01-20 18:12:44.669142663 +0000 UTC m=+6082.913747469" observedRunningTime="2026-01-20 18:12:45.942815286 +0000 UTC m=+6084.187420102" watchObservedRunningTime="2026-01-20 18:12:45.952879 +0000 UTC m=+6084.197483806" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.309136 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-24vcr"] Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.313880 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.321746 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-24vcr"] Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.447374 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vxfn\" (UniqueName: \"kubernetes.io/projected/f0f42b9c-c367-42c0-8b22-61a6c07d594d-kube-api-access-9vxfn\") pod \"redhat-marketplace-24vcr\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.447691 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-utilities\") pod \"redhat-marketplace-24vcr\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.447726 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-catalog-content\") pod \"redhat-marketplace-24vcr\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.549591 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-utilities\") pod \"redhat-marketplace-24vcr\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.549639 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-catalog-content\") pod \"redhat-marketplace-24vcr\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.549778 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vxfn\" (UniqueName: \"kubernetes.io/projected/f0f42b9c-c367-42c0-8b22-61a6c07d594d-kube-api-access-9vxfn\") pod \"redhat-marketplace-24vcr\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.550485 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-utilities\") pod \"redhat-marketplace-24vcr\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.550503 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-catalog-content\") pod \"redhat-marketplace-24vcr\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.577612 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vxfn\" (UniqueName: \"kubernetes.io/projected/f0f42b9c-c367-42c0-8b22-61a6c07d594d-kube-api-access-9vxfn\") pod \"redhat-marketplace-24vcr\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:46 crc kubenswrapper[4995]: I0120 18:12:46.646831 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:47 crc kubenswrapper[4995]: I0120 18:12:47.143914 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-24vcr"] Jan 20 18:12:47 crc kubenswrapper[4995]: I0120 18:12:47.939615 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24vcr" event={"ID":"f0f42b9c-c367-42c0-8b22-61a6c07d594d","Type":"ContainerStarted","Data":"b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84"} Jan 20 18:12:47 crc kubenswrapper[4995]: I0120 18:12:47.939876 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24vcr" event={"ID":"f0f42b9c-c367-42c0-8b22-61a6c07d594d","Type":"ContainerStarted","Data":"1bb630c66fa9d588e4a48ef7f3975f4343d7379c1b4120105f6b3615b72fc000"} Jan 20 18:12:49 crc kubenswrapper[4995]: I0120 18:12:49.959145 4995 generic.go:334] "Generic (PLEG): container finished" podID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerID="b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84" exitCode=0 Jan 20 18:12:49 crc kubenswrapper[4995]: I0120 18:12:49.959222 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24vcr" event={"ID":"f0f42b9c-c367-42c0-8b22-61a6c07d594d","Type":"ContainerDied","Data":"b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84"} Jan 20 18:12:50 crc kubenswrapper[4995]: I0120 18:12:50.891961 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:50 crc kubenswrapper[4995]: I0120 18:12:50.892349 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:12:50 crc kubenswrapper[4995]: I0120 18:12:50.969427 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24vcr" event={"ID":"f0f42b9c-c367-42c0-8b22-61a6c07d594d","Type":"ContainerStarted","Data":"250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e"} Jan 20 18:12:51 crc kubenswrapper[4995]: I0120 18:12:51.957347 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bpwxd" podUID="bc91524b-c181-4954-a940-42089bc9900a" containerName="registry-server" probeResult="failure" output=< Jan 20 18:12:51 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 18:12:51 crc kubenswrapper[4995]: > Jan 20 18:12:52 crc kubenswrapper[4995]: I0120 18:12:52.047175 4995 generic.go:334] "Generic (PLEG): container finished" podID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerID="250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e" exitCode=0 Jan 20 18:12:52 crc kubenswrapper[4995]: I0120 18:12:52.047239 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24vcr" event={"ID":"f0f42b9c-c367-42c0-8b22-61a6c07d594d","Type":"ContainerDied","Data":"250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e"} Jan 20 18:12:54 crc kubenswrapper[4995]: I0120 18:12:54.067951 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24vcr" event={"ID":"f0f42b9c-c367-42c0-8b22-61a6c07d594d","Type":"ContainerStarted","Data":"1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82"} Jan 20 18:12:54 crc kubenswrapper[4995]: I0120 18:12:54.094850 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-24vcr" podStartSLOduration=5.136984933 podStartE2EDuration="8.094829067s" podCreationTimestamp="2026-01-20 18:12:46 +0000 UTC" firstStartedPulling="2026-01-20 18:12:49.961723315 +0000 UTC m=+6088.206328121" lastFinishedPulling="2026-01-20 18:12:52.919567439 +0000 UTC m=+6091.164172255" observedRunningTime="2026-01-20 18:12:54.091476667 +0000 UTC m=+6092.336081483" watchObservedRunningTime="2026-01-20 18:12:54.094829067 +0000 UTC m=+6092.339433883" Jan 20 18:12:56 crc kubenswrapper[4995]: I0120 18:12:56.647030 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:56 crc kubenswrapper[4995]: I0120 18:12:56.648272 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:56 crc kubenswrapper[4995]: I0120 18:12:56.717395 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:12:58 crc kubenswrapper[4995]: I0120 18:12:58.171302 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:13:00 crc kubenswrapper[4995]: I0120 18:13:00.980590 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.070763 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.163098 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-24vcr"] Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.163400 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-24vcr" podUID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerName="registry-server" containerID="cri-o://1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82" gracePeriod=2 Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.713290 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.787066 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-utilities\") pod \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.787376 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vxfn\" (UniqueName: \"kubernetes.io/projected/f0f42b9c-c367-42c0-8b22-61a6c07d594d-kube-api-access-9vxfn\") pod \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.787420 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-catalog-content\") pod \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\" (UID: \"f0f42b9c-c367-42c0-8b22-61a6c07d594d\") " Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.788291 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-utilities" (OuterVolumeSpecName: "utilities") pod "f0f42b9c-c367-42c0-8b22-61a6c07d594d" (UID: "f0f42b9c-c367-42c0-8b22-61a6c07d594d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.794271 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0f42b9c-c367-42c0-8b22-61a6c07d594d-kube-api-access-9vxfn" (OuterVolumeSpecName: "kube-api-access-9vxfn") pod "f0f42b9c-c367-42c0-8b22-61a6c07d594d" (UID: "f0f42b9c-c367-42c0-8b22-61a6c07d594d"). InnerVolumeSpecName "kube-api-access-9vxfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.816406 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f0f42b9c-c367-42c0-8b22-61a6c07d594d" (UID: "f0f42b9c-c367-42c0-8b22-61a6c07d594d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.890352 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.890406 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vxfn\" (UniqueName: \"kubernetes.io/projected/f0f42b9c-c367-42c0-8b22-61a6c07d594d-kube-api-access-9vxfn\") on node \"crc\" DevicePath \"\"" Jan 20 18:13:01 crc kubenswrapper[4995]: I0120 18:13:01.890431 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0f42b9c-c367-42c0-8b22-61a6c07d594d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.168873 4995 generic.go:334] "Generic (PLEG): container finished" podID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerID="1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82" exitCode=0 Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.168939 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24vcr" event={"ID":"f0f42b9c-c367-42c0-8b22-61a6c07d594d","Type":"ContainerDied","Data":"1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82"} Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.168956 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24vcr" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.168993 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24vcr" event={"ID":"f0f42b9c-c367-42c0-8b22-61a6c07d594d","Type":"ContainerDied","Data":"1bb630c66fa9d588e4a48ef7f3975f4343d7379c1b4120105f6b3615b72fc000"} Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.169012 4995 scope.go:117] "RemoveContainer" containerID="1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.200309 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-24vcr"] Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.210002 4995 scope.go:117] "RemoveContainer" containerID="250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.210410 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-24vcr"] Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.243063 4995 scope.go:117] "RemoveContainer" containerID="b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.300703 4995 scope.go:117] "RemoveContainer" containerID="1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82" Jan 20 18:13:02 crc kubenswrapper[4995]: E0120 18:13:02.301192 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82\": container with ID starting with 1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82 not found: ID does not exist" containerID="1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.301252 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82"} err="failed to get container status \"1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82\": rpc error: code = NotFound desc = could not find container \"1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82\": container with ID starting with 1b7d6f31167e642ff40c7382068ba61fe36e6da89c9c48e39f21bf70b5395a82 not found: ID does not exist" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.301280 4995 scope.go:117] "RemoveContainer" containerID="250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e" Jan 20 18:13:02 crc kubenswrapper[4995]: E0120 18:13:02.301759 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e\": container with ID starting with 250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e not found: ID does not exist" containerID="250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.301809 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e"} err="failed to get container status \"250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e\": rpc error: code = NotFound desc = could not find container \"250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e\": container with ID starting with 250f1cf5087f49019473847618d7893e857e559c01a962122b3b355c374e6c4e not found: ID does not exist" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.301836 4995 scope.go:117] "RemoveContainer" containerID="b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84" Jan 20 18:13:02 crc kubenswrapper[4995]: E0120 18:13:02.302229 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84\": container with ID starting with b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84 not found: ID does not exist" containerID="b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84" Jan 20 18:13:02 crc kubenswrapper[4995]: I0120 18:13:02.302281 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84"} err="failed to get container status \"b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84\": rpc error: code = NotFound desc = could not find container \"b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84\": container with ID starting with b47ad7d9d7d387c490cfe204f2b22cc91ced8d2a3c1f2e15d7d2c0c7e5654f84 not found: ID does not exist" Jan 20 18:13:03 crc kubenswrapper[4995]: I0120 18:13:03.561564 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bpwxd"] Jan 20 18:13:03 crc kubenswrapper[4995]: I0120 18:13:03.562538 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bpwxd" podUID="bc91524b-c181-4954-a940-42089bc9900a" containerName="registry-server" containerID="cri-o://29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1" gracePeriod=2 Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.024376 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" path="/var/lib/kubelet/pods/f0f42b9c-c367-42c0-8b22-61a6c07d594d/volumes" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.059773 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.131933 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-catalog-content\") pod \"bc91524b-c181-4954-a940-42089bc9900a\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.132101 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzh4q\" (UniqueName: \"kubernetes.io/projected/bc91524b-c181-4954-a940-42089bc9900a-kube-api-access-jzh4q\") pod \"bc91524b-c181-4954-a940-42089bc9900a\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.132219 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-utilities\") pod \"bc91524b-c181-4954-a940-42089bc9900a\" (UID: \"bc91524b-c181-4954-a940-42089bc9900a\") " Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.136358 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-utilities" (OuterVolumeSpecName: "utilities") pod "bc91524b-c181-4954-a940-42089bc9900a" (UID: "bc91524b-c181-4954-a940-42089bc9900a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.140438 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc91524b-c181-4954-a940-42089bc9900a-kube-api-access-jzh4q" (OuterVolumeSpecName: "kube-api-access-jzh4q") pod "bc91524b-c181-4954-a940-42089bc9900a" (UID: "bc91524b-c181-4954-a940-42089bc9900a"). InnerVolumeSpecName "kube-api-access-jzh4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.194166 4995 generic.go:334] "Generic (PLEG): container finished" podID="bc91524b-c181-4954-a940-42089bc9900a" containerID="29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1" exitCode=0 Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.194576 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bpwxd" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.194577 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpwxd" event={"ID":"bc91524b-c181-4954-a940-42089bc9900a","Type":"ContainerDied","Data":"29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1"} Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.194848 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bpwxd" event={"ID":"bc91524b-c181-4954-a940-42089bc9900a","Type":"ContainerDied","Data":"90918fb4a962bd14cee4c4c066cffea5f43f0b46f64a0d65c72e19f7ba002f5f"} Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.194880 4995 scope.go:117] "RemoveContainer" containerID="29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.214794 4995 scope.go:117] "RemoveContainer" containerID="ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.238881 4995 scope.go:117] "RemoveContainer" containerID="dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.239526 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzh4q\" (UniqueName: \"kubernetes.io/projected/bc91524b-c181-4954-a940-42089bc9900a-kube-api-access-jzh4q\") on node \"crc\" DevicePath \"\"" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.239563 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.275697 4995 scope.go:117] "RemoveContainer" containerID="29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1" Jan 20 18:13:04 crc kubenswrapper[4995]: E0120 18:13:04.276292 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1\": container with ID starting with 29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1 not found: ID does not exist" containerID="29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.276334 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1"} err="failed to get container status \"29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1\": rpc error: code = NotFound desc = could not find container \"29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1\": container with ID starting with 29aae7726a834b23624c24a0557f29b978f35180fbeb4bf3f2120058dbb476b1 not found: ID does not exist" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.276361 4995 scope.go:117] "RemoveContainer" containerID="ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151" Jan 20 18:13:04 crc kubenswrapper[4995]: E0120 18:13:04.276798 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151\": container with ID starting with ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151 not found: ID does not exist" containerID="ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.276826 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151"} err="failed to get container status \"ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151\": rpc error: code = NotFound desc = could not find container \"ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151\": container with ID starting with ca4bc9794ec46b8403968f36d9aefb42c4b95843b3abf0154fe1e726e7c0c151 not found: ID does not exist" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.276842 4995 scope.go:117] "RemoveContainer" containerID="dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647" Jan 20 18:13:04 crc kubenswrapper[4995]: E0120 18:13:04.278343 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647\": container with ID starting with dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647 not found: ID does not exist" containerID="dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.278374 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647"} err="failed to get container status \"dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647\": rpc error: code = NotFound desc = could not find container \"dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647\": container with ID starting with dd743cfedf66adaf408766bb9e6eb3677874290d412ed02b83ee959e69d46647 not found: ID does not exist" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.288559 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bc91524b-c181-4954-a940-42089bc9900a" (UID: "bc91524b-c181-4954-a940-42089bc9900a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.341271 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bc91524b-c181-4954-a940-42089bc9900a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.539827 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bpwxd"] Jan 20 18:13:04 crc kubenswrapper[4995]: I0120 18:13:04.553586 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bpwxd"] Jan 20 18:13:06 crc kubenswrapper[4995]: I0120 18:13:06.005412 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc91524b-c181-4954-a940-42089bc9900a" path="/var/lib/kubelet/pods/bc91524b-c181-4954-a940-42089bc9900a/volumes" Jan 20 18:13:30 crc kubenswrapper[4995]: I0120 18:13:30.572298 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:13:30 crc kubenswrapper[4995]: I0120 18:13:30.573296 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:14:00 crc kubenswrapper[4995]: I0120 18:14:00.571504 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:14:00 crc kubenswrapper[4995]: I0120 18:14:00.574483 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.665872 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cll4d"] Jan 20 18:14:28 crc kubenswrapper[4995]: E0120 18:14:28.669509 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc91524b-c181-4954-a940-42089bc9900a" containerName="registry-server" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.669529 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc91524b-c181-4954-a940-42089bc9900a" containerName="registry-server" Jan 20 18:14:28 crc kubenswrapper[4995]: E0120 18:14:28.669560 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerName="extract-content" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.669569 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerName="extract-content" Jan 20 18:14:28 crc kubenswrapper[4995]: E0120 18:14:28.669594 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc91524b-c181-4954-a940-42089bc9900a" containerName="extract-content" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.669607 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc91524b-c181-4954-a940-42089bc9900a" containerName="extract-content" Jan 20 18:14:28 crc kubenswrapper[4995]: E0120 18:14:28.669633 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc91524b-c181-4954-a940-42089bc9900a" containerName="extract-utilities" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.669643 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc91524b-c181-4954-a940-42089bc9900a" containerName="extract-utilities" Jan 20 18:14:28 crc kubenswrapper[4995]: E0120 18:14:28.669658 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerName="registry-server" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.669667 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerName="registry-server" Jan 20 18:14:28 crc kubenswrapper[4995]: E0120 18:14:28.669693 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerName="extract-utilities" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.669701 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerName="extract-utilities" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.669981 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0f42b9c-c367-42c0-8b22-61a6c07d594d" containerName="registry-server" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.669995 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc91524b-c181-4954-a940-42089bc9900a" containerName="registry-server" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.671834 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.686568 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cll4d"] Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.732476 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-utilities\") pod \"community-operators-cll4d\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.732563 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-catalog-content\") pod \"community-operators-cll4d\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.732604 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djgng\" (UniqueName: \"kubernetes.io/projected/ab031ecc-ed6e-46c3-819c-258e8ff17269-kube-api-access-djgng\") pod \"community-operators-cll4d\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.834650 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-catalog-content\") pod \"community-operators-cll4d\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.834726 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djgng\" (UniqueName: \"kubernetes.io/projected/ab031ecc-ed6e-46c3-819c-258e8ff17269-kube-api-access-djgng\") pod \"community-operators-cll4d\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.834872 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-utilities\") pod \"community-operators-cll4d\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.835307 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-catalog-content\") pod \"community-operators-cll4d\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.835321 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-utilities\") pod \"community-operators-cll4d\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.867492 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djgng\" (UniqueName: \"kubernetes.io/projected/ab031ecc-ed6e-46c3-819c-258e8ff17269-kube-api-access-djgng\") pod \"community-operators-cll4d\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:28 crc kubenswrapper[4995]: I0120 18:14:28.996816 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:29 crc kubenswrapper[4995]: I0120 18:14:29.570802 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cll4d"] Jan 20 18:14:29 crc kubenswrapper[4995]: W0120 18:14:29.592524 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab031ecc_ed6e_46c3_819c_258e8ff17269.slice/crio-73bfe0918f8603d8a7ef3f7d0ab5ad13431e29e863507001f28aec5c7dd3f281 WatchSource:0}: Error finding container 73bfe0918f8603d8a7ef3f7d0ab5ad13431e29e863507001f28aec5c7dd3f281: Status 404 returned error can't find the container with id 73bfe0918f8603d8a7ef3f7d0ab5ad13431e29e863507001f28aec5c7dd3f281 Jan 20 18:14:30 crc kubenswrapper[4995]: I0120 18:14:30.200955 4995 generic.go:334] "Generic (PLEG): container finished" podID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerID="e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486" exitCode=0 Jan 20 18:14:30 crc kubenswrapper[4995]: I0120 18:14:30.201266 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cll4d" event={"ID":"ab031ecc-ed6e-46c3-819c-258e8ff17269","Type":"ContainerDied","Data":"e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486"} Jan 20 18:14:30 crc kubenswrapper[4995]: I0120 18:14:30.201297 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cll4d" event={"ID":"ab031ecc-ed6e-46c3-819c-258e8ff17269","Type":"ContainerStarted","Data":"73bfe0918f8603d8a7ef3f7d0ab5ad13431e29e863507001f28aec5c7dd3f281"} Jan 20 18:14:30 crc kubenswrapper[4995]: I0120 18:14:30.571770 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:14:30 crc kubenswrapper[4995]: I0120 18:14:30.572143 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:14:30 crc kubenswrapper[4995]: I0120 18:14:30.572182 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:14:30 crc kubenswrapper[4995]: I0120 18:14:30.572777 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3c2f41106cf47e0bd532ff85b56553c1306f7fe3267fd9b4798de41d2bd73ad3"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:14:30 crc kubenswrapper[4995]: I0120 18:14:30.572843 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://3c2f41106cf47e0bd532ff85b56553c1306f7fe3267fd9b4798de41d2bd73ad3" gracePeriod=600 Jan 20 18:14:31 crc kubenswrapper[4995]: I0120 18:14:31.222591 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="3c2f41106cf47e0bd532ff85b56553c1306f7fe3267fd9b4798de41d2bd73ad3" exitCode=0 Jan 20 18:14:31 crc kubenswrapper[4995]: I0120 18:14:31.222655 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"3c2f41106cf47e0bd532ff85b56553c1306f7fe3267fd9b4798de41d2bd73ad3"} Jan 20 18:14:31 crc kubenswrapper[4995]: I0120 18:14:31.223298 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc"} Jan 20 18:14:31 crc kubenswrapper[4995]: I0120 18:14:31.223336 4995 scope.go:117] "RemoveContainer" containerID="e4c46c7cf120a2ced92b817d42e95befe2931b3664cf00a1e000e9d20aaf7b17" Jan 20 18:14:31 crc kubenswrapper[4995]: I0120 18:14:31.235772 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cll4d" event={"ID":"ab031ecc-ed6e-46c3-819c-258e8ff17269","Type":"ContainerStarted","Data":"ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2"} Jan 20 18:14:32 crc kubenswrapper[4995]: I0120 18:14:32.261258 4995 generic.go:334] "Generic (PLEG): container finished" podID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerID="ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2" exitCode=0 Jan 20 18:14:32 crc kubenswrapper[4995]: I0120 18:14:32.261459 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cll4d" event={"ID":"ab031ecc-ed6e-46c3-819c-258e8ff17269","Type":"ContainerDied","Data":"ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2"} Jan 20 18:14:33 crc kubenswrapper[4995]: I0120 18:14:33.274807 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cll4d" event={"ID":"ab031ecc-ed6e-46c3-819c-258e8ff17269","Type":"ContainerStarted","Data":"d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1"} Jan 20 18:14:33 crc kubenswrapper[4995]: I0120 18:14:33.305841 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cll4d" podStartSLOduration=2.759368722 podStartE2EDuration="5.305821725s" podCreationTimestamp="2026-01-20 18:14:28 +0000 UTC" firstStartedPulling="2026-01-20 18:14:30.203502606 +0000 UTC m=+6188.448107412" lastFinishedPulling="2026-01-20 18:14:32.749955589 +0000 UTC m=+6190.994560415" observedRunningTime="2026-01-20 18:14:33.296969485 +0000 UTC m=+6191.541574331" watchObservedRunningTime="2026-01-20 18:14:33.305821725 +0000 UTC m=+6191.550426531" Jan 20 18:14:38 crc kubenswrapper[4995]: I0120 18:14:38.997315 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:38 crc kubenswrapper[4995]: I0120 18:14:38.998246 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:39 crc kubenswrapper[4995]: I0120 18:14:39.097146 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:39 crc kubenswrapper[4995]: I0120 18:14:39.414247 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:39 crc kubenswrapper[4995]: I0120 18:14:39.475910 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cll4d"] Jan 20 18:14:41 crc kubenswrapper[4995]: I0120 18:14:41.371037 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cll4d" podUID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerName="registry-server" containerID="cri-o://d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1" gracePeriod=2 Jan 20 18:14:41 crc kubenswrapper[4995]: I0120 18:14:41.856850 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:41 crc kubenswrapper[4995]: I0120 18:14:41.938929 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-utilities\") pod \"ab031ecc-ed6e-46c3-819c-258e8ff17269\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " Jan 20 18:14:41 crc kubenswrapper[4995]: I0120 18:14:41.940247 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-catalog-content\") pod \"ab031ecc-ed6e-46c3-819c-258e8ff17269\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " Jan 20 18:14:41 crc kubenswrapper[4995]: I0120 18:14:41.940454 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djgng\" (UniqueName: \"kubernetes.io/projected/ab031ecc-ed6e-46c3-819c-258e8ff17269-kube-api-access-djgng\") pod \"ab031ecc-ed6e-46c3-819c-258e8ff17269\" (UID: \"ab031ecc-ed6e-46c3-819c-258e8ff17269\") " Jan 20 18:14:41 crc kubenswrapper[4995]: I0120 18:14:41.942225 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-utilities" (OuterVolumeSpecName: "utilities") pod "ab031ecc-ed6e-46c3-819c-258e8ff17269" (UID: "ab031ecc-ed6e-46c3-819c-258e8ff17269"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:14:41 crc kubenswrapper[4995]: I0120 18:14:41.950822 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab031ecc-ed6e-46c3-819c-258e8ff17269-kube-api-access-djgng" (OuterVolumeSpecName: "kube-api-access-djgng") pod "ab031ecc-ed6e-46c3-819c-258e8ff17269" (UID: "ab031ecc-ed6e-46c3-819c-258e8ff17269"). InnerVolumeSpecName "kube-api-access-djgng". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.043980 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab031ecc-ed6e-46c3-819c-258e8ff17269" (UID: "ab031ecc-ed6e-46c3-819c-258e8ff17269"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.047002 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.047044 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab031ecc-ed6e-46c3-819c-258e8ff17269-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.047068 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djgng\" (UniqueName: \"kubernetes.io/projected/ab031ecc-ed6e-46c3-819c-258e8ff17269-kube-api-access-djgng\") on node \"crc\" DevicePath \"\"" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.394438 4995 generic.go:334] "Generic (PLEG): container finished" podID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerID="d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1" exitCode=0 Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.394546 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cll4d" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.394781 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cll4d" event={"ID":"ab031ecc-ed6e-46c3-819c-258e8ff17269","Type":"ContainerDied","Data":"d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1"} Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.394854 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cll4d" event={"ID":"ab031ecc-ed6e-46c3-819c-258e8ff17269","Type":"ContainerDied","Data":"73bfe0918f8603d8a7ef3f7d0ab5ad13431e29e863507001f28aec5c7dd3f281"} Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.394884 4995 scope.go:117] "RemoveContainer" containerID="d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.447135 4995 scope.go:117] "RemoveContainer" containerID="ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.451143 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cll4d"] Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.461755 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cll4d"] Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.474214 4995 scope.go:117] "RemoveContainer" containerID="e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.530680 4995 scope.go:117] "RemoveContainer" containerID="d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1" Jan 20 18:14:42 crc kubenswrapper[4995]: E0120 18:14:42.531269 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1\": container with ID starting with d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1 not found: ID does not exist" containerID="d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.531326 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1"} err="failed to get container status \"d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1\": rpc error: code = NotFound desc = could not find container \"d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1\": container with ID starting with d848f57062faa7e49d1b0efaa3a4ce762c1adab22a08081824da715ade95dde1 not found: ID does not exist" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.531354 4995 scope.go:117] "RemoveContainer" containerID="ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2" Jan 20 18:14:42 crc kubenswrapper[4995]: E0120 18:14:42.531776 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2\": container with ID starting with ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2 not found: ID does not exist" containerID="ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.531807 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2"} err="failed to get container status \"ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2\": rpc error: code = NotFound desc = could not find container \"ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2\": container with ID starting with ed590e909dd039149db652f594a8db2eb59f33f99b2667f8c0f607587643f1c2 not found: ID does not exist" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.531828 4995 scope.go:117] "RemoveContainer" containerID="e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486" Jan 20 18:14:42 crc kubenswrapper[4995]: E0120 18:14:42.532201 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486\": container with ID starting with e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486 not found: ID does not exist" containerID="e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486" Jan 20 18:14:42 crc kubenswrapper[4995]: I0120 18:14:42.532224 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486"} err="failed to get container status \"e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486\": rpc error: code = NotFound desc = could not find container \"e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486\": container with ID starting with e01a9b78a7ec2e4130a2b4c8175664f9d1057a06b979376c9bc1ead364f1b486 not found: ID does not exist" Jan 20 18:14:44 crc kubenswrapper[4995]: I0120 18:14:44.004645 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab031ecc-ed6e-46c3-819c-258e8ff17269" path="/var/lib/kubelet/pods/ab031ecc-ed6e-46c3-819c-258e8ff17269/volumes" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.158675 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh"] Jan 20 18:15:00 crc kubenswrapper[4995]: E0120 18:15:00.159606 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerName="registry-server" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.159620 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerName="registry-server" Jan 20 18:15:00 crc kubenswrapper[4995]: E0120 18:15:00.159641 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerName="extract-utilities" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.159648 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerName="extract-utilities" Jan 20 18:15:00 crc kubenswrapper[4995]: E0120 18:15:00.159660 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerName="extract-content" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.159665 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerName="extract-content" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.159871 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab031ecc-ed6e-46c3-819c-258e8ff17269" containerName="registry-server" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.160608 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.162888 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.162896 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.174663 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh"] Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.255743 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53063733-d6a3-485d-8fd7-954a15717d3b-secret-volume\") pod \"collect-profiles-29482215-w96jh\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.256048 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzxks\" (UniqueName: \"kubernetes.io/projected/53063733-d6a3-485d-8fd7-954a15717d3b-kube-api-access-vzxks\") pod \"collect-profiles-29482215-w96jh\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.256337 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53063733-d6a3-485d-8fd7-954a15717d3b-config-volume\") pod \"collect-profiles-29482215-w96jh\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.358392 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzxks\" (UniqueName: \"kubernetes.io/projected/53063733-d6a3-485d-8fd7-954a15717d3b-kube-api-access-vzxks\") pod \"collect-profiles-29482215-w96jh\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.358490 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53063733-d6a3-485d-8fd7-954a15717d3b-config-volume\") pod \"collect-profiles-29482215-w96jh\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.358636 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53063733-d6a3-485d-8fd7-954a15717d3b-secret-volume\") pod \"collect-profiles-29482215-w96jh\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.359694 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53063733-d6a3-485d-8fd7-954a15717d3b-config-volume\") pod \"collect-profiles-29482215-w96jh\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.370756 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53063733-d6a3-485d-8fd7-954a15717d3b-secret-volume\") pod \"collect-profiles-29482215-w96jh\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.400625 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzxks\" (UniqueName: \"kubernetes.io/projected/53063733-d6a3-485d-8fd7-954a15717d3b-kube-api-access-vzxks\") pod \"collect-profiles-29482215-w96jh\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.507756 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:00 crc kubenswrapper[4995]: I0120 18:15:00.975163 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh"] Jan 20 18:15:01 crc kubenswrapper[4995]: I0120 18:15:01.656229 4995 generic.go:334] "Generic (PLEG): container finished" podID="53063733-d6a3-485d-8fd7-954a15717d3b" containerID="77dc5cb54bb5c57d09ff0c9efcc9e84a45b00214f1b2f0bff3d02613d50dc874" exitCode=0 Jan 20 18:15:01 crc kubenswrapper[4995]: I0120 18:15:01.656473 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" event={"ID":"53063733-d6a3-485d-8fd7-954a15717d3b","Type":"ContainerDied","Data":"77dc5cb54bb5c57d09ff0c9efcc9e84a45b00214f1b2f0bff3d02613d50dc874"} Jan 20 18:15:01 crc kubenswrapper[4995]: I0120 18:15:01.656657 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" event={"ID":"53063733-d6a3-485d-8fd7-954a15717d3b","Type":"ContainerStarted","Data":"d9e52d75720ffe87c0f4399b721e5a3551be2c7ce2cd6153e81f151036b1731b"} Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.082881 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.115625 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53063733-d6a3-485d-8fd7-954a15717d3b-secret-volume\") pod \"53063733-d6a3-485d-8fd7-954a15717d3b\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.115677 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53063733-d6a3-485d-8fd7-954a15717d3b-config-volume\") pod \"53063733-d6a3-485d-8fd7-954a15717d3b\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.115821 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vzxks\" (UniqueName: \"kubernetes.io/projected/53063733-d6a3-485d-8fd7-954a15717d3b-kube-api-access-vzxks\") pod \"53063733-d6a3-485d-8fd7-954a15717d3b\" (UID: \"53063733-d6a3-485d-8fd7-954a15717d3b\") " Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.116598 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53063733-d6a3-485d-8fd7-954a15717d3b-config-volume" (OuterVolumeSpecName: "config-volume") pod "53063733-d6a3-485d-8fd7-954a15717d3b" (UID: "53063733-d6a3-485d-8fd7-954a15717d3b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.127248 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53063733-d6a3-485d-8fd7-954a15717d3b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "53063733-d6a3-485d-8fd7-954a15717d3b" (UID: "53063733-d6a3-485d-8fd7-954a15717d3b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.132572 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53063733-d6a3-485d-8fd7-954a15717d3b-kube-api-access-vzxks" (OuterVolumeSpecName: "kube-api-access-vzxks") pod "53063733-d6a3-485d-8fd7-954a15717d3b" (UID: "53063733-d6a3-485d-8fd7-954a15717d3b"). InnerVolumeSpecName "kube-api-access-vzxks". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.217949 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vzxks\" (UniqueName: \"kubernetes.io/projected/53063733-d6a3-485d-8fd7-954a15717d3b-kube-api-access-vzxks\") on node \"crc\" DevicePath \"\"" Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.217992 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53063733-d6a3-485d-8fd7-954a15717d3b-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.218005 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53063733-d6a3-485d-8fd7-954a15717d3b-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.690767 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" event={"ID":"53063733-d6a3-485d-8fd7-954a15717d3b","Type":"ContainerDied","Data":"d9e52d75720ffe87c0f4399b721e5a3551be2c7ce2cd6153e81f151036b1731b"} Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.691057 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9e52d75720ffe87c0f4399b721e5a3551be2c7ce2cd6153e81f151036b1731b" Jan 20 18:15:03 crc kubenswrapper[4995]: I0120 18:15:03.690889 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh" Jan 20 18:15:04 crc kubenswrapper[4995]: I0120 18:15:04.202368 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn"] Jan 20 18:15:04 crc kubenswrapper[4995]: I0120 18:15:04.211095 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482170-5wfqn"] Jan 20 18:15:06 crc kubenswrapper[4995]: I0120 18:15:06.008258 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="820a1e7e-7454-46fe-8b07-3d9074b53c22" path="/var/lib/kubelet/pods/820a1e7e-7454-46fe-8b07-3d9074b53c22/volumes" Jan 20 18:15:35 crc kubenswrapper[4995]: I0120 18:15:35.983757 4995 scope.go:117] "RemoveContainer" containerID="ad66521b9ded8aab52f28247fc2be0d95c8ba864b4aaad893a4ea860e26ad68c" Jan 20 18:16:30 crc kubenswrapper[4995]: I0120 18:16:30.571936 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:16:30 crc kubenswrapper[4995]: I0120 18:16:30.572745 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:17:00 crc kubenswrapper[4995]: I0120 18:17:00.571541 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:17:00 crc kubenswrapper[4995]: I0120 18:17:00.572341 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:17:30 crc kubenswrapper[4995]: I0120 18:17:30.571778 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:17:30 crc kubenswrapper[4995]: I0120 18:17:30.572375 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:17:30 crc kubenswrapper[4995]: I0120 18:17:30.572422 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:17:30 crc kubenswrapper[4995]: I0120 18:17:30.573359 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:17:30 crc kubenswrapper[4995]: I0120 18:17:30.573433 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" gracePeriod=600 Jan 20 18:17:30 crc kubenswrapper[4995]: E0120 18:17:30.718350 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:17:30 crc kubenswrapper[4995]: I0120 18:17:30.867645 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" exitCode=0 Jan 20 18:17:30 crc kubenswrapper[4995]: I0120 18:17:30.867691 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc"} Jan 20 18:17:30 crc kubenswrapper[4995]: I0120 18:17:30.867729 4995 scope.go:117] "RemoveContainer" containerID="3c2f41106cf47e0bd532ff85b56553c1306f7fe3267fd9b4798de41d2bd73ad3" Jan 20 18:17:30 crc kubenswrapper[4995]: I0120 18:17:30.868123 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:17:30 crc kubenswrapper[4995]: E0120 18:17:30.868409 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:17:42 crc kubenswrapper[4995]: I0120 18:17:42.990294 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:17:42 crc kubenswrapper[4995]: E0120 18:17:42.991592 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:17:54 crc kubenswrapper[4995]: I0120 18:17:54.989883 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:17:54 crc kubenswrapper[4995]: E0120 18:17:54.990604 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:18:07 crc kubenswrapper[4995]: I0120 18:18:07.991007 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:18:07 crc kubenswrapper[4995]: E0120 18:18:07.992139 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:18:20 crc kubenswrapper[4995]: I0120 18:18:20.990284 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:18:20 crc kubenswrapper[4995]: E0120 18:18:20.991998 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:18:32 crc kubenswrapper[4995]: I0120 18:18:32.000874 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:18:32 crc kubenswrapper[4995]: E0120 18:18:32.001574 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:18:46 crc kubenswrapper[4995]: I0120 18:18:46.992758 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:18:46 crc kubenswrapper[4995]: E0120 18:18:46.998271 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:19:00 crc kubenswrapper[4995]: I0120 18:19:00.989849 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:19:00 crc kubenswrapper[4995]: E0120 18:19:00.991139 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:19:14 crc kubenswrapper[4995]: I0120 18:19:14.989626 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:19:14 crc kubenswrapper[4995]: E0120 18:19:14.991884 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:19:27 crc kubenswrapper[4995]: I0120 18:19:27.990257 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:19:27 crc kubenswrapper[4995]: E0120 18:19:27.991101 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:19:40 crc kubenswrapper[4995]: I0120 18:19:40.990049 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:19:40 crc kubenswrapper[4995]: E0120 18:19:40.991824 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:19:52 crc kubenswrapper[4995]: I0120 18:19:52.004228 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:19:52 crc kubenswrapper[4995]: E0120 18:19:52.005591 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:20:02 crc kubenswrapper[4995]: I0120 18:20:02.989700 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:20:02 crc kubenswrapper[4995]: E0120 18:20:02.990582 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:20:16 crc kubenswrapper[4995]: I0120 18:20:16.989810 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:20:16 crc kubenswrapper[4995]: E0120 18:20:16.990583 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:20:27 crc kubenswrapper[4995]: I0120 18:20:27.990607 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:20:27 crc kubenswrapper[4995]: E0120 18:20:27.991893 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:20:40 crc kubenswrapper[4995]: I0120 18:20:40.989768 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:20:40 crc kubenswrapper[4995]: E0120 18:20:40.990630 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:20:53 crc kubenswrapper[4995]: I0120 18:20:53.991290 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:20:53 crc kubenswrapper[4995]: E0120 18:20:53.992205 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:21:05 crc kubenswrapper[4995]: I0120 18:21:05.259179 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:21:05 crc kubenswrapper[4995]: E0120 18:21:05.260135 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.600998 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zcnbk"] Jan 20 18:21:11 crc kubenswrapper[4995]: E0120 18:21:11.602174 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53063733-d6a3-485d-8fd7-954a15717d3b" containerName="collect-profiles" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.602190 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="53063733-d6a3-485d-8fd7-954a15717d3b" containerName="collect-profiles" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.602431 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="53063733-d6a3-485d-8fd7-954a15717d3b" containerName="collect-profiles" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.604100 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.618968 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zcnbk"] Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.662823 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhxvr\" (UniqueName: \"kubernetes.io/projected/a5c9e454-341e-4889-b3a5-4cf8d8052302-kube-api-access-lhxvr\") pod \"certified-operators-zcnbk\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.662927 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-utilities\") pod \"certified-operators-zcnbk\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.662975 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-catalog-content\") pod \"certified-operators-zcnbk\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.765213 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-catalog-content\") pod \"certified-operators-zcnbk\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.765367 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhxvr\" (UniqueName: \"kubernetes.io/projected/a5c9e454-341e-4889-b3a5-4cf8d8052302-kube-api-access-lhxvr\") pod \"certified-operators-zcnbk\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.765458 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-utilities\") pod \"certified-operators-zcnbk\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.766043 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-utilities\") pod \"certified-operators-zcnbk\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.766162 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-catalog-content\") pod \"certified-operators-zcnbk\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.797721 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhxvr\" (UniqueName: \"kubernetes.io/projected/a5c9e454-341e-4889-b3a5-4cf8d8052302-kube-api-access-lhxvr\") pod \"certified-operators-zcnbk\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:11 crc kubenswrapper[4995]: I0120 18:21:11.939711 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:12 crc kubenswrapper[4995]: I0120 18:21:12.544474 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zcnbk"] Jan 20 18:21:12 crc kubenswrapper[4995]: W0120 18:21:12.546154 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5c9e454_341e_4889_b3a5_4cf8d8052302.slice/crio-2ed7a79f926181107b7f483e3deadc9da3ae0ca62c84a994fb803a0778aa475c WatchSource:0}: Error finding container 2ed7a79f926181107b7f483e3deadc9da3ae0ca62c84a994fb803a0778aa475c: Status 404 returned error can't find the container with id 2ed7a79f926181107b7f483e3deadc9da3ae0ca62c84a994fb803a0778aa475c Jan 20 18:21:13 crc kubenswrapper[4995]: I0120 18:21:13.444722 4995 generic.go:334] "Generic (PLEG): container finished" podID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerID="6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13" exitCode=0 Jan 20 18:21:13 crc kubenswrapper[4995]: I0120 18:21:13.444803 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcnbk" event={"ID":"a5c9e454-341e-4889-b3a5-4cf8d8052302","Type":"ContainerDied","Data":"6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13"} Jan 20 18:21:13 crc kubenswrapper[4995]: I0120 18:21:13.445045 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcnbk" event={"ID":"a5c9e454-341e-4889-b3a5-4cf8d8052302","Type":"ContainerStarted","Data":"2ed7a79f926181107b7f483e3deadc9da3ae0ca62c84a994fb803a0778aa475c"} Jan 20 18:21:13 crc kubenswrapper[4995]: I0120 18:21:13.446818 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 18:21:14 crc kubenswrapper[4995]: I0120 18:21:14.479284 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcnbk" event={"ID":"a5c9e454-341e-4889-b3a5-4cf8d8052302","Type":"ContainerStarted","Data":"18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e"} Jan 20 18:21:15 crc kubenswrapper[4995]: I0120 18:21:15.493045 4995 generic.go:334] "Generic (PLEG): container finished" podID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerID="18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e" exitCode=0 Jan 20 18:21:15 crc kubenswrapper[4995]: I0120 18:21:15.493165 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcnbk" event={"ID":"a5c9e454-341e-4889-b3a5-4cf8d8052302","Type":"ContainerDied","Data":"18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e"} Jan 20 18:21:16 crc kubenswrapper[4995]: I0120 18:21:16.503505 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcnbk" event={"ID":"a5c9e454-341e-4889-b3a5-4cf8d8052302","Type":"ContainerStarted","Data":"5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db"} Jan 20 18:21:16 crc kubenswrapper[4995]: I0120 18:21:16.527112 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zcnbk" podStartSLOduration=3.007512011 podStartE2EDuration="5.527093172s" podCreationTimestamp="2026-01-20 18:21:11 +0000 UTC" firstStartedPulling="2026-01-20 18:21:13.446621492 +0000 UTC m=+6591.691226298" lastFinishedPulling="2026-01-20 18:21:15.966202653 +0000 UTC m=+6594.210807459" observedRunningTime="2026-01-20 18:21:16.521730876 +0000 UTC m=+6594.766335692" watchObservedRunningTime="2026-01-20 18:21:16.527093172 +0000 UTC m=+6594.771697968" Jan 20 18:21:17 crc kubenswrapper[4995]: I0120 18:21:17.990184 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:21:17 crc kubenswrapper[4995]: E0120 18:21:17.990969 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:21:21 crc kubenswrapper[4995]: I0120 18:21:21.940569 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:21 crc kubenswrapper[4995]: I0120 18:21:21.941557 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:22 crc kubenswrapper[4995]: I0120 18:21:22.033677 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:22 crc kubenswrapper[4995]: I0120 18:21:22.645717 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:22 crc kubenswrapper[4995]: I0120 18:21:22.697000 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zcnbk"] Jan 20 18:21:24 crc kubenswrapper[4995]: I0120 18:21:24.632750 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zcnbk" podUID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerName="registry-server" containerID="cri-o://5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db" gracePeriod=2 Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.149578 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.188715 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-catalog-content\") pod \"a5c9e454-341e-4889-b3a5-4cf8d8052302\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.188961 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhxvr\" (UniqueName: \"kubernetes.io/projected/a5c9e454-341e-4889-b3a5-4cf8d8052302-kube-api-access-lhxvr\") pod \"a5c9e454-341e-4889-b3a5-4cf8d8052302\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.189226 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-utilities\") pod \"a5c9e454-341e-4889-b3a5-4cf8d8052302\" (UID: \"a5c9e454-341e-4889-b3a5-4cf8d8052302\") " Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.193298 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-utilities" (OuterVolumeSpecName: "utilities") pod "a5c9e454-341e-4889-b3a5-4cf8d8052302" (UID: "a5c9e454-341e-4889-b3a5-4cf8d8052302"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.198692 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5c9e454-341e-4889-b3a5-4cf8d8052302-kube-api-access-lhxvr" (OuterVolumeSpecName: "kube-api-access-lhxvr") pod "a5c9e454-341e-4889-b3a5-4cf8d8052302" (UID: "a5c9e454-341e-4889-b3a5-4cf8d8052302"). InnerVolumeSpecName "kube-api-access-lhxvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.292555 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhxvr\" (UniqueName: \"kubernetes.io/projected/a5c9e454-341e-4889-b3a5-4cf8d8052302-kube-api-access-lhxvr\") on node \"crc\" DevicePath \"\"" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.292602 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.350163 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a5c9e454-341e-4889-b3a5-4cf8d8052302" (UID: "a5c9e454-341e-4889-b3a5-4cf8d8052302"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.395694 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5c9e454-341e-4889-b3a5-4cf8d8052302-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.642090 4995 generic.go:334] "Generic (PLEG): container finished" podID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerID="5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db" exitCode=0 Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.642132 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcnbk" event={"ID":"a5c9e454-341e-4889-b3a5-4cf8d8052302","Type":"ContainerDied","Data":"5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db"} Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.642174 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zcnbk" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.642186 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zcnbk" event={"ID":"a5c9e454-341e-4889-b3a5-4cf8d8052302","Type":"ContainerDied","Data":"2ed7a79f926181107b7f483e3deadc9da3ae0ca62c84a994fb803a0778aa475c"} Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.642205 4995 scope.go:117] "RemoveContainer" containerID="5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.676549 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zcnbk"] Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.677169 4995 scope.go:117] "RemoveContainer" containerID="18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.688884 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zcnbk"] Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.713148 4995 scope.go:117] "RemoveContainer" containerID="6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.747836 4995 scope.go:117] "RemoveContainer" containerID="5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db" Jan 20 18:21:25 crc kubenswrapper[4995]: E0120 18:21:25.748347 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db\": container with ID starting with 5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db not found: ID does not exist" containerID="5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.748381 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db"} err="failed to get container status \"5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db\": rpc error: code = NotFound desc = could not find container \"5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db\": container with ID starting with 5e543556bf32e00ec18b5a7bc75e9365f376b254a7b516258a69d22aedd6d1db not found: ID does not exist" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.748402 4995 scope.go:117] "RemoveContainer" containerID="18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e" Jan 20 18:21:25 crc kubenswrapper[4995]: E0120 18:21:25.748858 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e\": container with ID starting with 18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e not found: ID does not exist" containerID="18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.748883 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e"} err="failed to get container status \"18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e\": rpc error: code = NotFound desc = could not find container \"18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e\": container with ID starting with 18641e9f229001226a6f2c417b8a22b9fc43bcefb4647bb163fc41d529678e1e not found: ID does not exist" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.748897 4995 scope.go:117] "RemoveContainer" containerID="6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13" Jan 20 18:21:25 crc kubenswrapper[4995]: E0120 18:21:25.749270 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13\": container with ID starting with 6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13 not found: ID does not exist" containerID="6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13" Jan 20 18:21:25 crc kubenswrapper[4995]: I0120 18:21:25.749324 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13"} err="failed to get container status \"6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13\": rpc error: code = NotFound desc = could not find container \"6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13\": container with ID starting with 6abb2b14ad8e83a1349185e37e036877d07cd0d05789d20c0591f62a7cdaec13 not found: ID does not exist" Jan 20 18:21:26 crc kubenswrapper[4995]: I0120 18:21:26.006835 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5c9e454-341e-4889-b3a5-4cf8d8052302" path="/var/lib/kubelet/pods/a5c9e454-341e-4889-b3a5-4cf8d8052302/volumes" Jan 20 18:21:28 crc kubenswrapper[4995]: I0120 18:21:28.990775 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:21:28 crc kubenswrapper[4995]: E0120 18:21:28.991796 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:21:43 crc kubenswrapper[4995]: I0120 18:21:43.991106 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:21:43 crc kubenswrapper[4995]: E0120 18:21:43.994284 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:21:56 crc kubenswrapper[4995]: I0120 18:21:56.989475 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:21:56 crc kubenswrapper[4995]: E0120 18:21:56.990388 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:22:08 crc kubenswrapper[4995]: I0120 18:22:08.990105 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:22:08 crc kubenswrapper[4995]: E0120 18:22:08.990740 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:22:20 crc kubenswrapper[4995]: I0120 18:22:20.989967 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:22:20 crc kubenswrapper[4995]: E0120 18:22:20.990737 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:22:33 crc kubenswrapper[4995]: I0120 18:22:33.990160 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:22:34 crc kubenswrapper[4995]: I0120 18:22:34.503152 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"6290eb77e0700a720d61585c376f0060aca7c42b33d3f0c28a5f7dae16d0fd08"} Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.574270 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zvszp"] Jan 20 18:23:10 crc kubenswrapper[4995]: E0120 18:23:10.575364 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerName="registry-server" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.575382 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerName="registry-server" Jan 20 18:23:10 crc kubenswrapper[4995]: E0120 18:23:10.575422 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerName="extract-content" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.575430 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerName="extract-content" Jan 20 18:23:10 crc kubenswrapper[4995]: E0120 18:23:10.575464 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerName="extract-utilities" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.575472 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerName="extract-utilities" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.575691 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5c9e454-341e-4889-b3a5-4cf8d8052302" containerName="registry-server" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.577399 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.595407 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zvszp"] Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.686637 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-utilities\") pod \"redhat-marketplace-zvszp\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.686855 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-catalog-content\") pod \"redhat-marketplace-zvszp\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.686985 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blq2d\" (UniqueName: \"kubernetes.io/projected/dfd28a0f-25f4-4a67-b404-46dc07c8661c-kube-api-access-blq2d\") pod \"redhat-marketplace-zvszp\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.788763 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-catalog-content\") pod \"redhat-marketplace-zvszp\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.788822 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blq2d\" (UniqueName: \"kubernetes.io/projected/dfd28a0f-25f4-4a67-b404-46dc07c8661c-kube-api-access-blq2d\") pod \"redhat-marketplace-zvszp\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.788893 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-utilities\") pod \"redhat-marketplace-zvszp\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.789288 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-utilities\") pod \"redhat-marketplace-zvszp\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.789485 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-catalog-content\") pod \"redhat-marketplace-zvszp\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.806974 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blq2d\" (UniqueName: \"kubernetes.io/projected/dfd28a0f-25f4-4a67-b404-46dc07c8661c-kube-api-access-blq2d\") pod \"redhat-marketplace-zvszp\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:10 crc kubenswrapper[4995]: I0120 18:23:10.901060 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:11 crc kubenswrapper[4995]: I0120 18:23:11.437248 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zvszp"] Jan 20 18:23:11 crc kubenswrapper[4995]: I0120 18:23:11.975374 4995 generic.go:334] "Generic (PLEG): container finished" podID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerID="b6c6f4de6995d6b6e24c1de6afbb4aa9787b765003f847476f58a06453be02b1" exitCode=0 Jan 20 18:23:11 crc kubenswrapper[4995]: I0120 18:23:11.976069 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvszp" event={"ID":"dfd28a0f-25f4-4a67-b404-46dc07c8661c","Type":"ContainerDied","Data":"b6c6f4de6995d6b6e24c1de6afbb4aa9787b765003f847476f58a06453be02b1"} Jan 20 18:23:11 crc kubenswrapper[4995]: I0120 18:23:11.976177 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvszp" event={"ID":"dfd28a0f-25f4-4a67-b404-46dc07c8661c","Type":"ContainerStarted","Data":"2c675435fe10fe7ab90297be23eec4d5b696a88794fd1338dc4efdaa1eb1b0ac"} Jan 20 18:23:12 crc kubenswrapper[4995]: I0120 18:23:12.987060 4995 generic.go:334] "Generic (PLEG): container finished" podID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerID="4dc72d36799d6eb1e8dd66b06e909edfeb38b7c5789fdd4c44b052cb82f6006a" exitCode=0 Jan 20 18:23:12 crc kubenswrapper[4995]: I0120 18:23:12.987852 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvszp" event={"ID":"dfd28a0f-25f4-4a67-b404-46dc07c8661c","Type":"ContainerDied","Data":"4dc72d36799d6eb1e8dd66b06e909edfeb38b7c5789fdd4c44b052cb82f6006a"} Jan 20 18:23:14 crc kubenswrapper[4995]: I0120 18:23:14.001185 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvszp" event={"ID":"dfd28a0f-25f4-4a67-b404-46dc07c8661c","Type":"ContainerStarted","Data":"1e32392e9dd0bb5c0d55211a580455b0c966f9d91954b51ac098e344d2741815"} Jan 20 18:23:14 crc kubenswrapper[4995]: I0120 18:23:14.020838 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zvszp" podStartSLOduration=2.561929469 podStartE2EDuration="4.020819982s" podCreationTimestamp="2026-01-20 18:23:10 +0000 UTC" firstStartedPulling="2026-01-20 18:23:11.980108763 +0000 UTC m=+6710.224713609" lastFinishedPulling="2026-01-20 18:23:13.438999286 +0000 UTC m=+6711.683604122" observedRunningTime="2026-01-20 18:23:14.019929739 +0000 UTC m=+6712.264534545" watchObservedRunningTime="2026-01-20 18:23:14.020819982 +0000 UTC m=+6712.265424788" Jan 20 18:23:20 crc kubenswrapper[4995]: I0120 18:23:20.902219 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:20 crc kubenswrapper[4995]: I0120 18:23:20.902791 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:20 crc kubenswrapper[4995]: I0120 18:23:20.970512 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:21 crc kubenswrapper[4995]: I0120 18:23:21.159736 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:21 crc kubenswrapper[4995]: I0120 18:23:21.243895 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zvszp"] Jan 20 18:23:23 crc kubenswrapper[4995]: I0120 18:23:23.104914 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zvszp" podUID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerName="registry-server" containerID="cri-o://1e32392e9dd0bb5c0d55211a580455b0c966f9d91954b51ac098e344d2741815" gracePeriod=2 Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.123164 4995 generic.go:334] "Generic (PLEG): container finished" podID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerID="1e32392e9dd0bb5c0d55211a580455b0c966f9d91954b51ac098e344d2741815" exitCode=0 Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.123261 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvszp" event={"ID":"dfd28a0f-25f4-4a67-b404-46dc07c8661c","Type":"ContainerDied","Data":"1e32392e9dd0bb5c0d55211a580455b0c966f9d91954b51ac098e344d2741815"} Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.123682 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zvszp" event={"ID":"dfd28a0f-25f4-4a67-b404-46dc07c8661c","Type":"ContainerDied","Data":"2c675435fe10fe7ab90297be23eec4d5b696a88794fd1338dc4efdaa1eb1b0ac"} Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.123702 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c675435fe10fe7ab90297be23eec4d5b696a88794fd1338dc4efdaa1eb1b0ac" Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.180847 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.288483 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-catalog-content\") pod \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.288649 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-utilities\") pod \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.288860 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blq2d\" (UniqueName: \"kubernetes.io/projected/dfd28a0f-25f4-4a67-b404-46dc07c8661c-kube-api-access-blq2d\") pod \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\" (UID: \"dfd28a0f-25f4-4a67-b404-46dc07c8661c\") " Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.289730 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-utilities" (OuterVolumeSpecName: "utilities") pod "dfd28a0f-25f4-4a67-b404-46dc07c8661c" (UID: "dfd28a0f-25f4-4a67-b404-46dc07c8661c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.298061 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfd28a0f-25f4-4a67-b404-46dc07c8661c-kube-api-access-blq2d" (OuterVolumeSpecName: "kube-api-access-blq2d") pod "dfd28a0f-25f4-4a67-b404-46dc07c8661c" (UID: "dfd28a0f-25f4-4a67-b404-46dc07c8661c"). InnerVolumeSpecName "kube-api-access-blq2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.312711 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dfd28a0f-25f4-4a67-b404-46dc07c8661c" (UID: "dfd28a0f-25f4-4a67-b404-46dc07c8661c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.391464 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.391492 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dfd28a0f-25f4-4a67-b404-46dc07c8661c-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:23:24 crc kubenswrapper[4995]: I0120 18:23:24.391502 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blq2d\" (UniqueName: \"kubernetes.io/projected/dfd28a0f-25f4-4a67-b404-46dc07c8661c-kube-api-access-blq2d\") on node \"crc\" DevicePath \"\"" Jan 20 18:23:25 crc kubenswrapper[4995]: I0120 18:23:25.139543 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zvszp" Jan 20 18:23:25 crc kubenswrapper[4995]: I0120 18:23:25.206999 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zvszp"] Jan 20 18:23:25 crc kubenswrapper[4995]: I0120 18:23:25.249499 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zvszp"] Jan 20 18:23:26 crc kubenswrapper[4995]: I0120 18:23:26.016842 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" path="/var/lib/kubelet/pods/dfd28a0f-25f4-4a67-b404-46dc07c8661c/volumes" Jan 20 18:24:01 crc kubenswrapper[4995]: I0120 18:24:01.958675 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wsp5w"] Jan 20 18:24:01 crc kubenswrapper[4995]: E0120 18:24:01.959755 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerName="registry-server" Jan 20 18:24:01 crc kubenswrapper[4995]: I0120 18:24:01.959771 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerName="registry-server" Jan 20 18:24:01 crc kubenswrapper[4995]: E0120 18:24:01.959798 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerName="extract-content" Jan 20 18:24:01 crc kubenswrapper[4995]: I0120 18:24:01.959808 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerName="extract-content" Jan 20 18:24:01 crc kubenswrapper[4995]: E0120 18:24:01.959851 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerName="extract-utilities" Jan 20 18:24:01 crc kubenswrapper[4995]: I0120 18:24:01.959860 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerName="extract-utilities" Jan 20 18:24:01 crc kubenswrapper[4995]: I0120 18:24:01.960160 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfd28a0f-25f4-4a67-b404-46dc07c8661c" containerName="registry-server" Jan 20 18:24:01 crc kubenswrapper[4995]: I0120 18:24:01.965026 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:01 crc kubenswrapper[4995]: I0120 18:24:01.986875 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wsp5w"] Jan 20 18:24:01 crc kubenswrapper[4995]: I0120 18:24:01.999468 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-utilities\") pod \"redhat-operators-wsp5w\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:01.999722 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-catalog-content\") pod \"redhat-operators-wsp5w\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:01.999888 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6vgj\" (UniqueName: \"kubernetes.io/projected/6aaa69d1-2180-4312-95ce-b5ab5480d934-kube-api-access-z6vgj\") pod \"redhat-operators-wsp5w\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:02.102397 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-catalog-content\") pod \"redhat-operators-wsp5w\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:02.102531 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6vgj\" (UniqueName: \"kubernetes.io/projected/6aaa69d1-2180-4312-95ce-b5ab5480d934-kube-api-access-z6vgj\") pod \"redhat-operators-wsp5w\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:02.102601 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-utilities\") pod \"redhat-operators-wsp5w\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:02.103195 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-utilities\") pod \"redhat-operators-wsp5w\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:02.103395 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-catalog-content\") pod \"redhat-operators-wsp5w\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:02.129744 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6vgj\" (UniqueName: \"kubernetes.io/projected/6aaa69d1-2180-4312-95ce-b5ab5480d934-kube-api-access-z6vgj\") pod \"redhat-operators-wsp5w\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:02.314948 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:02 crc kubenswrapper[4995]: I0120 18:24:02.913816 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wsp5w"] Jan 20 18:24:02 crc kubenswrapper[4995]: W0120 18:24:02.933605 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6aaa69d1_2180_4312_95ce_b5ab5480d934.slice/crio-999b09c161abd2d018b82928aa95fdc2841742f202ed91cac293085f34bcaf68 WatchSource:0}: Error finding container 999b09c161abd2d018b82928aa95fdc2841742f202ed91cac293085f34bcaf68: Status 404 returned error can't find the container with id 999b09c161abd2d018b82928aa95fdc2841742f202ed91cac293085f34bcaf68 Jan 20 18:24:03 crc kubenswrapper[4995]: I0120 18:24:03.905754 4995 generic.go:334] "Generic (PLEG): container finished" podID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerID="dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe" exitCode=0 Jan 20 18:24:03 crc kubenswrapper[4995]: I0120 18:24:03.905949 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wsp5w" event={"ID":"6aaa69d1-2180-4312-95ce-b5ab5480d934","Type":"ContainerDied","Data":"dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe"} Jan 20 18:24:03 crc kubenswrapper[4995]: I0120 18:24:03.906046 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wsp5w" event={"ID":"6aaa69d1-2180-4312-95ce-b5ab5480d934","Type":"ContainerStarted","Data":"999b09c161abd2d018b82928aa95fdc2841742f202ed91cac293085f34bcaf68"} Jan 20 18:24:05 crc kubenswrapper[4995]: I0120 18:24:05.924979 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wsp5w" event={"ID":"6aaa69d1-2180-4312-95ce-b5ab5480d934","Type":"ContainerStarted","Data":"0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a"} Jan 20 18:24:08 crc kubenswrapper[4995]: I0120 18:24:08.960910 4995 generic.go:334] "Generic (PLEG): container finished" podID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerID="0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a" exitCode=0 Jan 20 18:24:08 crc kubenswrapper[4995]: I0120 18:24:08.961016 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wsp5w" event={"ID":"6aaa69d1-2180-4312-95ce-b5ab5480d934","Type":"ContainerDied","Data":"0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a"} Jan 20 18:24:09 crc kubenswrapper[4995]: I0120 18:24:09.979122 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wsp5w" event={"ID":"6aaa69d1-2180-4312-95ce-b5ab5480d934","Type":"ContainerStarted","Data":"8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392"} Jan 20 18:24:10 crc kubenswrapper[4995]: I0120 18:24:10.016945 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wsp5w" podStartSLOduration=3.31765662 podStartE2EDuration="9.016923404s" podCreationTimestamp="2026-01-20 18:24:01 +0000 UTC" firstStartedPulling="2026-01-20 18:24:03.90892646 +0000 UTC m=+6762.153531266" lastFinishedPulling="2026-01-20 18:24:09.608193204 +0000 UTC m=+6767.852798050" observedRunningTime="2026-01-20 18:24:10.0038862 +0000 UTC m=+6768.248491046" watchObservedRunningTime="2026-01-20 18:24:10.016923404 +0000 UTC m=+6768.261528210" Jan 20 18:24:12 crc kubenswrapper[4995]: I0120 18:24:12.315912 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:12 crc kubenswrapper[4995]: I0120 18:24:12.317466 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:13 crc kubenswrapper[4995]: I0120 18:24:13.397206 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wsp5w" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerName="registry-server" probeResult="failure" output=< Jan 20 18:24:13 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 18:24:13 crc kubenswrapper[4995]: > Jan 20 18:24:22 crc kubenswrapper[4995]: I0120 18:24:22.380533 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:22 crc kubenswrapper[4995]: I0120 18:24:22.456133 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:24 crc kubenswrapper[4995]: I0120 18:24:24.313505 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wsp5w"] Jan 20 18:24:24 crc kubenswrapper[4995]: I0120 18:24:24.314262 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wsp5w" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerName="registry-server" containerID="cri-o://8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392" gracePeriod=2 Jan 20 18:24:24 crc kubenswrapper[4995]: I0120 18:24:24.758306 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:24 crc kubenswrapper[4995]: I0120 18:24:24.913839 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6vgj\" (UniqueName: \"kubernetes.io/projected/6aaa69d1-2180-4312-95ce-b5ab5480d934-kube-api-access-z6vgj\") pod \"6aaa69d1-2180-4312-95ce-b5ab5480d934\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " Jan 20 18:24:24 crc kubenswrapper[4995]: I0120 18:24:24.914140 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-catalog-content\") pod \"6aaa69d1-2180-4312-95ce-b5ab5480d934\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " Jan 20 18:24:24 crc kubenswrapper[4995]: I0120 18:24:24.914297 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-utilities\") pod \"6aaa69d1-2180-4312-95ce-b5ab5480d934\" (UID: \"6aaa69d1-2180-4312-95ce-b5ab5480d934\") " Jan 20 18:24:24 crc kubenswrapper[4995]: I0120 18:24:24.922422 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6aaa69d1-2180-4312-95ce-b5ab5480d934-kube-api-access-z6vgj" (OuterVolumeSpecName: "kube-api-access-z6vgj") pod "6aaa69d1-2180-4312-95ce-b5ab5480d934" (UID: "6aaa69d1-2180-4312-95ce-b5ab5480d934"). InnerVolumeSpecName "kube-api-access-z6vgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:24:24 crc kubenswrapper[4995]: I0120 18:24:24.922835 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-utilities" (OuterVolumeSpecName: "utilities") pod "6aaa69d1-2180-4312-95ce-b5ab5480d934" (UID: "6aaa69d1-2180-4312-95ce-b5ab5480d934"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.017835 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.017888 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6vgj\" (UniqueName: \"kubernetes.io/projected/6aaa69d1-2180-4312-95ce-b5ab5480d934-kube-api-access-z6vgj\") on node \"crc\" DevicePath \"\"" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.043409 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6aaa69d1-2180-4312-95ce-b5ab5480d934" (UID: "6aaa69d1-2180-4312-95ce-b5ab5480d934"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.119884 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6aaa69d1-2180-4312-95ce-b5ab5480d934-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.129228 4995 generic.go:334] "Generic (PLEG): container finished" podID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerID="8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392" exitCode=0 Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.129268 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wsp5w" event={"ID":"6aaa69d1-2180-4312-95ce-b5ab5480d934","Type":"ContainerDied","Data":"8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392"} Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.129292 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wsp5w" event={"ID":"6aaa69d1-2180-4312-95ce-b5ab5480d934","Type":"ContainerDied","Data":"999b09c161abd2d018b82928aa95fdc2841742f202ed91cac293085f34bcaf68"} Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.129293 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wsp5w" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.129307 4995 scope.go:117] "RemoveContainer" containerID="8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.164261 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wsp5w"] Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.172505 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wsp5w"] Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.174261 4995 scope.go:117] "RemoveContainer" containerID="0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.200369 4995 scope.go:117] "RemoveContainer" containerID="dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.243359 4995 scope.go:117] "RemoveContainer" containerID="8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392" Jan 20 18:24:25 crc kubenswrapper[4995]: E0120 18:24:25.244186 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392\": container with ID starting with 8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392 not found: ID does not exist" containerID="8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.244257 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392"} err="failed to get container status \"8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392\": rpc error: code = NotFound desc = could not find container \"8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392\": container with ID starting with 8308de74bd0080cd3b373160e2f9d79c8687f350c8b3c87c6aa65e8e20fbe392 not found: ID does not exist" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.244292 4995 scope.go:117] "RemoveContainer" containerID="0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a" Jan 20 18:24:25 crc kubenswrapper[4995]: E0120 18:24:25.244864 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a\": container with ID starting with 0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a not found: ID does not exist" containerID="0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.244936 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a"} err="failed to get container status \"0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a\": rpc error: code = NotFound desc = could not find container \"0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a\": container with ID starting with 0096f2e21b6f0cd767bee5bfa7234ffe4b10f4d510306c7377bebde33e1d6b7a not found: ID does not exist" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.244989 4995 scope.go:117] "RemoveContainer" containerID="dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe" Jan 20 18:24:25 crc kubenswrapper[4995]: E0120 18:24:25.251590 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe\": container with ID starting with dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe not found: ID does not exist" containerID="dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe" Jan 20 18:24:25 crc kubenswrapper[4995]: I0120 18:24:25.251686 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe"} err="failed to get container status \"dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe\": rpc error: code = NotFound desc = could not find container \"dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe\": container with ID starting with dcbbe11731abc8da815216f06b9446ea4825bf2f9680b4be4ecce2c93c00adfe not found: ID does not exist" Jan 20 18:24:26 crc kubenswrapper[4995]: I0120 18:24:26.001263 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" path="/var/lib/kubelet/pods/6aaa69d1-2180-4312-95ce-b5ab5480d934/volumes" Jan 20 18:25:00 crc kubenswrapper[4995]: I0120 18:25:00.571453 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:25:00 crc kubenswrapper[4995]: I0120 18:25:00.572129 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.214023 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tcbpc"] Jan 20 18:25:02 crc kubenswrapper[4995]: E0120 18:25:02.215350 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerName="registry-server" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.215374 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerName="registry-server" Jan 20 18:25:02 crc kubenswrapper[4995]: E0120 18:25:02.215429 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerName="extract-content" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.215442 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerName="extract-content" Jan 20 18:25:02 crc kubenswrapper[4995]: E0120 18:25:02.215472 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerName="extract-utilities" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.215484 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerName="extract-utilities" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.215809 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="6aaa69d1-2180-4312-95ce-b5ab5480d934" containerName="registry-server" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.218321 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.226112 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tcbpc"] Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.327825 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-catalog-content\") pod \"community-operators-tcbpc\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.328164 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-utilities\") pod \"community-operators-tcbpc\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.328325 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dll2k\" (UniqueName: \"kubernetes.io/projected/de2401dd-2f08-4269-af36-83a1a98a664d-kube-api-access-dll2k\") pod \"community-operators-tcbpc\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.430596 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dll2k\" (UniqueName: \"kubernetes.io/projected/de2401dd-2f08-4269-af36-83a1a98a664d-kube-api-access-dll2k\") pod \"community-operators-tcbpc\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.430898 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-catalog-content\") pod \"community-operators-tcbpc\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.431016 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-utilities\") pod \"community-operators-tcbpc\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.431496 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-catalog-content\") pod \"community-operators-tcbpc\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.431697 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-utilities\") pod \"community-operators-tcbpc\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.451545 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dll2k\" (UniqueName: \"kubernetes.io/projected/de2401dd-2f08-4269-af36-83a1a98a664d-kube-api-access-dll2k\") pod \"community-operators-tcbpc\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:02 crc kubenswrapper[4995]: I0120 18:25:02.570138 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:03 crc kubenswrapper[4995]: I0120 18:25:03.134104 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tcbpc"] Jan 20 18:25:03 crc kubenswrapper[4995]: W0120 18:25:03.141400 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde2401dd_2f08_4269_af36_83a1a98a664d.slice/crio-67491799293cf32b5c9118ee5b9503701b0a6cfe8b1bdc20f322ac830d2be7a9 WatchSource:0}: Error finding container 67491799293cf32b5c9118ee5b9503701b0a6cfe8b1bdc20f322ac830d2be7a9: Status 404 returned error can't find the container with id 67491799293cf32b5c9118ee5b9503701b0a6cfe8b1bdc20f322ac830d2be7a9 Jan 20 18:25:03 crc kubenswrapper[4995]: I0120 18:25:03.579561 4995 generic.go:334] "Generic (PLEG): container finished" podID="de2401dd-2f08-4269-af36-83a1a98a664d" containerID="a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68" exitCode=0 Jan 20 18:25:03 crc kubenswrapper[4995]: I0120 18:25:03.579608 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tcbpc" event={"ID":"de2401dd-2f08-4269-af36-83a1a98a664d","Type":"ContainerDied","Data":"a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68"} Jan 20 18:25:03 crc kubenswrapper[4995]: I0120 18:25:03.579635 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tcbpc" event={"ID":"de2401dd-2f08-4269-af36-83a1a98a664d","Type":"ContainerStarted","Data":"67491799293cf32b5c9118ee5b9503701b0a6cfe8b1bdc20f322ac830d2be7a9"} Jan 20 18:25:05 crc kubenswrapper[4995]: I0120 18:25:05.598568 4995 generic.go:334] "Generic (PLEG): container finished" podID="de2401dd-2f08-4269-af36-83a1a98a664d" containerID="5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae" exitCode=0 Jan 20 18:25:05 crc kubenswrapper[4995]: I0120 18:25:05.598643 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tcbpc" event={"ID":"de2401dd-2f08-4269-af36-83a1a98a664d","Type":"ContainerDied","Data":"5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae"} Jan 20 18:25:06 crc kubenswrapper[4995]: I0120 18:25:06.616435 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tcbpc" event={"ID":"de2401dd-2f08-4269-af36-83a1a98a664d","Type":"ContainerStarted","Data":"1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b"} Jan 20 18:25:06 crc kubenswrapper[4995]: I0120 18:25:06.639745 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tcbpc" podStartSLOduration=1.919407831 podStartE2EDuration="4.639724109s" podCreationTimestamp="2026-01-20 18:25:02 +0000 UTC" firstStartedPulling="2026-01-20 18:25:03.581296697 +0000 UTC m=+6821.825901503" lastFinishedPulling="2026-01-20 18:25:06.301612965 +0000 UTC m=+6824.546217781" observedRunningTime="2026-01-20 18:25:06.63239126 +0000 UTC m=+6824.876996066" watchObservedRunningTime="2026-01-20 18:25:06.639724109 +0000 UTC m=+6824.884328935" Jan 20 18:25:12 crc kubenswrapper[4995]: I0120 18:25:12.570572 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:12 crc kubenswrapper[4995]: I0120 18:25:12.571671 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:12 crc kubenswrapper[4995]: I0120 18:25:12.654952 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:12 crc kubenswrapper[4995]: I0120 18:25:12.762769 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:12 crc kubenswrapper[4995]: I0120 18:25:12.921187 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tcbpc"] Jan 20 18:25:14 crc kubenswrapper[4995]: I0120 18:25:14.715883 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tcbpc" podUID="de2401dd-2f08-4269-af36-83a1a98a664d" containerName="registry-server" containerID="cri-o://1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b" gracePeriod=2 Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.254097 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.332181 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dll2k\" (UniqueName: \"kubernetes.io/projected/de2401dd-2f08-4269-af36-83a1a98a664d-kube-api-access-dll2k\") pod \"de2401dd-2f08-4269-af36-83a1a98a664d\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.332411 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-catalog-content\") pod \"de2401dd-2f08-4269-af36-83a1a98a664d\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.335472 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-utilities\") pod \"de2401dd-2f08-4269-af36-83a1a98a664d\" (UID: \"de2401dd-2f08-4269-af36-83a1a98a664d\") " Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.336474 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-utilities" (OuterVolumeSpecName: "utilities") pod "de2401dd-2f08-4269-af36-83a1a98a664d" (UID: "de2401dd-2f08-4269-af36-83a1a98a664d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.339830 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de2401dd-2f08-4269-af36-83a1a98a664d-kube-api-access-dll2k" (OuterVolumeSpecName: "kube-api-access-dll2k") pod "de2401dd-2f08-4269-af36-83a1a98a664d" (UID: "de2401dd-2f08-4269-af36-83a1a98a664d"). InnerVolumeSpecName "kube-api-access-dll2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.405873 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de2401dd-2f08-4269-af36-83a1a98a664d" (UID: "de2401dd-2f08-4269-af36-83a1a98a664d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.438041 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.438069 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2401dd-2f08-4269-af36-83a1a98a664d-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.438100 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dll2k\" (UniqueName: \"kubernetes.io/projected/de2401dd-2f08-4269-af36-83a1a98a664d-kube-api-access-dll2k\") on node \"crc\" DevicePath \"\"" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.733223 4995 generic.go:334] "Generic (PLEG): container finished" podID="de2401dd-2f08-4269-af36-83a1a98a664d" containerID="1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b" exitCode=0 Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.733287 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tcbpc" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.733315 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tcbpc" event={"ID":"de2401dd-2f08-4269-af36-83a1a98a664d","Type":"ContainerDied","Data":"1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b"} Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.734543 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tcbpc" event={"ID":"de2401dd-2f08-4269-af36-83a1a98a664d","Type":"ContainerDied","Data":"67491799293cf32b5c9118ee5b9503701b0a6cfe8b1bdc20f322ac830d2be7a9"} Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.734597 4995 scope.go:117] "RemoveContainer" containerID="1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.765880 4995 scope.go:117] "RemoveContainer" containerID="5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.806261 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tcbpc"] Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.808011 4995 scope.go:117] "RemoveContainer" containerID="a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.819735 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tcbpc"] Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.856726 4995 scope.go:117] "RemoveContainer" containerID="1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b" Jan 20 18:25:15 crc kubenswrapper[4995]: E0120 18:25:15.857399 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b\": container with ID starting with 1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b not found: ID does not exist" containerID="1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.857580 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b"} err="failed to get container status \"1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b\": rpc error: code = NotFound desc = could not find container \"1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b\": container with ID starting with 1ba84b47464c25a1f44eef9f025b7436d2f87f78a46f4791f12086572cbc406b not found: ID does not exist" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.857703 4995 scope.go:117] "RemoveContainer" containerID="5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae" Jan 20 18:25:15 crc kubenswrapper[4995]: E0120 18:25:15.858304 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae\": container with ID starting with 5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae not found: ID does not exist" containerID="5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.858341 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae"} err="failed to get container status \"5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae\": rpc error: code = NotFound desc = could not find container \"5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae\": container with ID starting with 5c6ecdcf46db48c1a907facdbb310c0a47d6734b6ad67eb0b390ad718b205dae not found: ID does not exist" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.858373 4995 scope.go:117] "RemoveContainer" containerID="a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68" Jan 20 18:25:15 crc kubenswrapper[4995]: E0120 18:25:15.858825 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68\": container with ID starting with a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68 not found: ID does not exist" containerID="a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68" Jan 20 18:25:15 crc kubenswrapper[4995]: I0120 18:25:15.858928 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68"} err="failed to get container status \"a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68\": rpc error: code = NotFound desc = could not find container \"a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68\": container with ID starting with a5dbb2a1d1d45e73d8e025be9f0ea4a3ecbb352639b67f16bb64fbb5db40cf68 not found: ID does not exist" Jan 20 18:25:16 crc kubenswrapper[4995]: I0120 18:25:16.011320 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de2401dd-2f08-4269-af36-83a1a98a664d" path="/var/lib/kubelet/pods/de2401dd-2f08-4269-af36-83a1a98a664d/volumes" Jan 20 18:25:30 crc kubenswrapper[4995]: I0120 18:25:30.571744 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:25:30 crc kubenswrapper[4995]: I0120 18:25:30.572511 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:26:00 crc kubenswrapper[4995]: I0120 18:26:00.571768 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:26:00 crc kubenswrapper[4995]: I0120 18:26:00.574991 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:26:00 crc kubenswrapper[4995]: I0120 18:26:00.575262 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:26:00 crc kubenswrapper[4995]: I0120 18:26:00.576591 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6290eb77e0700a720d61585c376f0060aca7c42b33d3f0c28a5f7dae16d0fd08"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:26:00 crc kubenswrapper[4995]: I0120 18:26:00.576919 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://6290eb77e0700a720d61585c376f0060aca7c42b33d3f0c28a5f7dae16d0fd08" gracePeriod=600 Jan 20 18:26:01 crc kubenswrapper[4995]: I0120 18:26:01.300392 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="6290eb77e0700a720d61585c376f0060aca7c42b33d3f0c28a5f7dae16d0fd08" exitCode=0 Jan 20 18:26:01 crc kubenswrapper[4995]: I0120 18:26:01.300516 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"6290eb77e0700a720d61585c376f0060aca7c42b33d3f0c28a5f7dae16d0fd08"} Jan 20 18:26:01 crc kubenswrapper[4995]: I0120 18:26:01.300934 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202"} Jan 20 18:26:01 crc kubenswrapper[4995]: I0120 18:26:01.300955 4995 scope.go:117] "RemoveContainer" containerID="a96d8a1a915592baf2927f59bf2cc9e9c81bc9e7baa58c7fd3f31714460e07fc" Jan 20 18:28:00 crc kubenswrapper[4995]: I0120 18:28:00.572130 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:28:00 crc kubenswrapper[4995]: I0120 18:28:00.572643 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:28:30 crc kubenswrapper[4995]: I0120 18:28:30.571803 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:28:30 crc kubenswrapper[4995]: I0120 18:28:30.572710 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:29:00 crc kubenswrapper[4995]: I0120 18:29:00.571199 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:29:00 crc kubenswrapper[4995]: I0120 18:29:00.571789 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:29:00 crc kubenswrapper[4995]: I0120 18:29:00.571829 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:29:00 crc kubenswrapper[4995]: I0120 18:29:00.572591 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:29:00 crc kubenswrapper[4995]: I0120 18:29:00.572634 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" gracePeriod=600 Jan 20 18:29:00 crc kubenswrapper[4995]: E0120 18:29:00.702618 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:29:01 crc kubenswrapper[4995]: I0120 18:29:01.386117 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" exitCode=0 Jan 20 18:29:01 crc kubenswrapper[4995]: I0120 18:29:01.386157 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202"} Jan 20 18:29:01 crc kubenswrapper[4995]: I0120 18:29:01.386522 4995 scope.go:117] "RemoveContainer" containerID="6290eb77e0700a720d61585c376f0060aca7c42b33d3f0c28a5f7dae16d0fd08" Jan 20 18:29:01 crc kubenswrapper[4995]: I0120 18:29:01.387484 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:29:01 crc kubenswrapper[4995]: E0120 18:29:01.388139 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:29:14 crc kubenswrapper[4995]: I0120 18:29:14.990281 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:29:14 crc kubenswrapper[4995]: E0120 18:29:14.991620 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:29:27 crc kubenswrapper[4995]: I0120 18:29:27.989827 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:29:27 crc kubenswrapper[4995]: E0120 18:29:27.993958 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:29:36 crc kubenswrapper[4995]: I0120 18:29:36.477755 4995 scope.go:117] "RemoveContainer" containerID="b6c6f4de6995d6b6e24c1de6afbb4aa9787b765003f847476f58a06453be02b1" Jan 20 18:29:36 crc kubenswrapper[4995]: I0120 18:29:36.513021 4995 scope.go:117] "RemoveContainer" containerID="1e32392e9dd0bb5c0d55211a580455b0c966f9d91954b51ac098e344d2741815" Jan 20 18:29:36 crc kubenswrapper[4995]: I0120 18:29:36.562940 4995 scope.go:117] "RemoveContainer" containerID="4dc72d36799d6eb1e8dd66b06e909edfeb38b7c5789fdd4c44b052cb82f6006a" Jan 20 18:29:39 crc kubenswrapper[4995]: I0120 18:29:39.989896 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:29:39 crc kubenswrapper[4995]: E0120 18:29:39.990491 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:29:54 crc kubenswrapper[4995]: I0120 18:29:54.990319 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:29:54 crc kubenswrapper[4995]: E0120 18:29:54.991825 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.145005 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8"] Jan 20 18:30:00 crc kubenswrapper[4995]: E0120 18:30:00.146104 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de2401dd-2f08-4269-af36-83a1a98a664d" containerName="extract-utilities" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.146130 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="de2401dd-2f08-4269-af36-83a1a98a664d" containerName="extract-utilities" Jan 20 18:30:00 crc kubenswrapper[4995]: E0120 18:30:00.146163 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de2401dd-2f08-4269-af36-83a1a98a664d" containerName="registry-server" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.146172 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="de2401dd-2f08-4269-af36-83a1a98a664d" containerName="registry-server" Jan 20 18:30:00 crc kubenswrapper[4995]: E0120 18:30:00.146190 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de2401dd-2f08-4269-af36-83a1a98a664d" containerName="extract-content" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.146199 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="de2401dd-2f08-4269-af36-83a1a98a664d" containerName="extract-content" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.146442 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="de2401dd-2f08-4269-af36-83a1a98a664d" containerName="registry-server" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.147281 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.149664 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.150831 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.157499 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8"] Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.233139 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqcsq\" (UniqueName: \"kubernetes.io/projected/16764a57-e02b-45c0-8fa1-cd0b409f447d-kube-api-access-dqcsq\") pod \"collect-profiles-29482230-h5fw8\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.233318 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16764a57-e02b-45c0-8fa1-cd0b409f447d-secret-volume\") pod \"collect-profiles-29482230-h5fw8\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.233486 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16764a57-e02b-45c0-8fa1-cd0b409f447d-config-volume\") pod \"collect-profiles-29482230-h5fw8\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.335843 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16764a57-e02b-45c0-8fa1-cd0b409f447d-config-volume\") pod \"collect-profiles-29482230-h5fw8\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.335945 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqcsq\" (UniqueName: \"kubernetes.io/projected/16764a57-e02b-45c0-8fa1-cd0b409f447d-kube-api-access-dqcsq\") pod \"collect-profiles-29482230-h5fw8\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.335985 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16764a57-e02b-45c0-8fa1-cd0b409f447d-secret-volume\") pod \"collect-profiles-29482230-h5fw8\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.337818 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16764a57-e02b-45c0-8fa1-cd0b409f447d-config-volume\") pod \"collect-profiles-29482230-h5fw8\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.345660 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16764a57-e02b-45c0-8fa1-cd0b409f447d-secret-volume\") pod \"collect-profiles-29482230-h5fw8\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.355046 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqcsq\" (UniqueName: \"kubernetes.io/projected/16764a57-e02b-45c0-8fa1-cd0b409f447d-kube-api-access-dqcsq\") pod \"collect-profiles-29482230-h5fw8\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.466346 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:00 crc kubenswrapper[4995]: I0120 18:30:00.944190 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8"] Jan 20 18:30:01 crc kubenswrapper[4995]: I0120 18:30:01.041525 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" event={"ID":"16764a57-e02b-45c0-8fa1-cd0b409f447d","Type":"ContainerStarted","Data":"f7c58ec59732cc469d8f6be4132504611c02e1a96386da60d92d04ddad25bafc"} Jan 20 18:30:02 crc kubenswrapper[4995]: I0120 18:30:02.052698 4995 generic.go:334] "Generic (PLEG): container finished" podID="16764a57-e02b-45c0-8fa1-cd0b409f447d" containerID="80810eb478f48ccd0a81dc32bff651616666a395b80601ed383a969db4fa3629" exitCode=0 Jan 20 18:30:02 crc kubenswrapper[4995]: I0120 18:30:02.052792 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" event={"ID":"16764a57-e02b-45c0-8fa1-cd0b409f447d","Type":"ContainerDied","Data":"80810eb478f48ccd0a81dc32bff651616666a395b80601ed383a969db4fa3629"} Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.443348 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.502758 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqcsq\" (UniqueName: \"kubernetes.io/projected/16764a57-e02b-45c0-8fa1-cd0b409f447d-kube-api-access-dqcsq\") pod \"16764a57-e02b-45c0-8fa1-cd0b409f447d\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.502956 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16764a57-e02b-45c0-8fa1-cd0b409f447d-secret-volume\") pod \"16764a57-e02b-45c0-8fa1-cd0b409f447d\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.503201 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16764a57-e02b-45c0-8fa1-cd0b409f447d-config-volume\") pod \"16764a57-e02b-45c0-8fa1-cd0b409f447d\" (UID: \"16764a57-e02b-45c0-8fa1-cd0b409f447d\") " Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.504502 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16764a57-e02b-45c0-8fa1-cd0b409f447d-config-volume" (OuterVolumeSpecName: "config-volume") pod "16764a57-e02b-45c0-8fa1-cd0b409f447d" (UID: "16764a57-e02b-45c0-8fa1-cd0b409f447d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.510828 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16764a57-e02b-45c0-8fa1-cd0b409f447d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "16764a57-e02b-45c0-8fa1-cd0b409f447d" (UID: "16764a57-e02b-45c0-8fa1-cd0b409f447d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.511243 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16764a57-e02b-45c0-8fa1-cd0b409f447d-kube-api-access-dqcsq" (OuterVolumeSpecName: "kube-api-access-dqcsq") pod "16764a57-e02b-45c0-8fa1-cd0b409f447d" (UID: "16764a57-e02b-45c0-8fa1-cd0b409f447d"). InnerVolumeSpecName "kube-api-access-dqcsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.605839 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/16764a57-e02b-45c0-8fa1-cd0b409f447d-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.605884 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/16764a57-e02b-45c0-8fa1-cd0b409f447d-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 18:30:03 crc kubenswrapper[4995]: I0120 18:30:03.605899 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqcsq\" (UniqueName: \"kubernetes.io/projected/16764a57-e02b-45c0-8fa1-cd0b409f447d-kube-api-access-dqcsq\") on node \"crc\" DevicePath \"\"" Jan 20 18:30:04 crc kubenswrapper[4995]: I0120 18:30:04.071043 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" event={"ID":"16764a57-e02b-45c0-8fa1-cd0b409f447d","Type":"ContainerDied","Data":"f7c58ec59732cc469d8f6be4132504611c02e1a96386da60d92d04ddad25bafc"} Jan 20 18:30:04 crc kubenswrapper[4995]: I0120 18:30:04.071099 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8" Jan 20 18:30:04 crc kubenswrapper[4995]: I0120 18:30:04.071110 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7c58ec59732cc469d8f6be4132504611c02e1a96386da60d92d04ddad25bafc" Jan 20 18:30:04 crc kubenswrapper[4995]: I0120 18:30:04.558133 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs"] Jan 20 18:30:04 crc kubenswrapper[4995]: I0120 18:30:04.575645 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482185-lwcjs"] Jan 20 18:30:06 crc kubenswrapper[4995]: I0120 18:30:06.012633 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4039f52-226d-493f-9d76-5c92ccaba556" path="/var/lib/kubelet/pods/d4039f52-226d-493f-9d76-5c92ccaba556/volumes" Jan 20 18:30:07 crc kubenswrapper[4995]: I0120 18:30:07.989618 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:30:07 crc kubenswrapper[4995]: E0120 18:30:07.990312 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:30:18 crc kubenswrapper[4995]: I0120 18:30:18.990874 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:30:18 crc kubenswrapper[4995]: E0120 18:30:18.992292 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:30:30 crc kubenswrapper[4995]: I0120 18:30:30.989971 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:30:30 crc kubenswrapper[4995]: E0120 18:30:30.991069 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:30:36 crc kubenswrapper[4995]: I0120 18:30:36.620829 4995 scope.go:117] "RemoveContainer" containerID="42aa0d3da87eb12682e5fe21640d8f2df983b6ae5a0ecb5c7d3bdefc3bd35a32" Jan 20 18:30:41 crc kubenswrapper[4995]: I0120 18:30:41.996310 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:30:41 crc kubenswrapper[4995]: E0120 18:30:41.997181 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:30:52 crc kubenswrapper[4995]: I0120 18:30:52.990160 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:30:52 crc kubenswrapper[4995]: E0120 18:30:52.994576 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:31:03 crc kubenswrapper[4995]: I0120 18:31:03.990956 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:31:03 crc kubenswrapper[4995]: E0120 18:31:03.991978 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:31:14 crc kubenswrapper[4995]: I0120 18:31:14.989277 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:31:14 crc kubenswrapper[4995]: E0120 18:31:14.989984 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:31:28 crc kubenswrapper[4995]: I0120 18:31:28.990852 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:31:28 crc kubenswrapper[4995]: E0120 18:31:28.992168 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:31:42 crc kubenswrapper[4995]: I0120 18:31:42.989845 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:31:42 crc kubenswrapper[4995]: E0120 18:31:42.990865 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.660896 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-msxpf"] Jan 20 18:31:49 crc kubenswrapper[4995]: E0120 18:31:49.661696 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16764a57-e02b-45c0-8fa1-cd0b409f447d" containerName="collect-profiles" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.661708 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="16764a57-e02b-45c0-8fa1-cd0b409f447d" containerName="collect-profiles" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.661934 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="16764a57-e02b-45c0-8fa1-cd0b409f447d" containerName="collect-profiles" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.663364 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.671056 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-msxpf"] Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.740371 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-utilities\") pod \"certified-operators-msxpf\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.740435 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-catalog-content\") pod \"certified-operators-msxpf\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.740690 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zmrl\" (UniqueName: \"kubernetes.io/projected/158c03d6-f3de-44c5-941b-38b94cef407c-kube-api-access-6zmrl\") pod \"certified-operators-msxpf\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.841943 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zmrl\" (UniqueName: \"kubernetes.io/projected/158c03d6-f3de-44c5-941b-38b94cef407c-kube-api-access-6zmrl\") pod \"certified-operators-msxpf\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.842107 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-utilities\") pod \"certified-operators-msxpf\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.842144 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-catalog-content\") pod \"certified-operators-msxpf\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.842597 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-utilities\") pod \"certified-operators-msxpf\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.842689 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-catalog-content\") pod \"certified-operators-msxpf\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.868811 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zmrl\" (UniqueName: \"kubernetes.io/projected/158c03d6-f3de-44c5-941b-38b94cef407c-kube-api-access-6zmrl\") pod \"certified-operators-msxpf\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:49 crc kubenswrapper[4995]: I0120 18:31:49.989342 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:31:50 crc kubenswrapper[4995]: I0120 18:31:50.306429 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-msxpf"] Jan 20 18:31:51 crc kubenswrapper[4995]: I0120 18:31:51.268804 4995 generic.go:334] "Generic (PLEG): container finished" podID="158c03d6-f3de-44c5-941b-38b94cef407c" containerID="6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7" exitCode=0 Jan 20 18:31:51 crc kubenswrapper[4995]: I0120 18:31:51.268920 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msxpf" event={"ID":"158c03d6-f3de-44c5-941b-38b94cef407c","Type":"ContainerDied","Data":"6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7"} Jan 20 18:31:51 crc kubenswrapper[4995]: I0120 18:31:51.269237 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msxpf" event={"ID":"158c03d6-f3de-44c5-941b-38b94cef407c","Type":"ContainerStarted","Data":"eb9200278dc2785afa7d698c70e388e89165d335738b3999a1fc3713af99bd2e"} Jan 20 18:31:51 crc kubenswrapper[4995]: I0120 18:31:51.272690 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 18:31:53 crc kubenswrapper[4995]: I0120 18:31:53.299657 4995 generic.go:334] "Generic (PLEG): container finished" podID="158c03d6-f3de-44c5-941b-38b94cef407c" containerID="94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6" exitCode=0 Jan 20 18:31:53 crc kubenswrapper[4995]: I0120 18:31:53.299761 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msxpf" event={"ID":"158c03d6-f3de-44c5-941b-38b94cef407c","Type":"ContainerDied","Data":"94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6"} Jan 20 18:31:53 crc kubenswrapper[4995]: I0120 18:31:53.990891 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:31:53 crc kubenswrapper[4995]: E0120 18:31:53.991791 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:31:54 crc kubenswrapper[4995]: I0120 18:31:54.319589 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msxpf" event={"ID":"158c03d6-f3de-44c5-941b-38b94cef407c","Type":"ContainerStarted","Data":"e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67"} Jan 20 18:31:54 crc kubenswrapper[4995]: I0120 18:31:54.355673 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-msxpf" podStartSLOduration=2.5842845199999998 podStartE2EDuration="5.355652123s" podCreationTimestamp="2026-01-20 18:31:49 +0000 UTC" firstStartedPulling="2026-01-20 18:31:51.270964639 +0000 UTC m=+7229.515569445" lastFinishedPulling="2026-01-20 18:31:54.042332232 +0000 UTC m=+7232.286937048" observedRunningTime="2026-01-20 18:31:54.342292441 +0000 UTC m=+7232.586897287" watchObservedRunningTime="2026-01-20 18:31:54.355652123 +0000 UTC m=+7232.600256939" Jan 20 18:32:00 crc kubenswrapper[4995]: I0120 18:32:00.004395 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:32:00 crc kubenswrapper[4995]: I0120 18:32:00.005466 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:32:00 crc kubenswrapper[4995]: I0120 18:32:00.057813 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:32:00 crc kubenswrapper[4995]: I0120 18:32:00.474797 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:32:00 crc kubenswrapper[4995]: I0120 18:32:00.556518 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-msxpf"] Jan 20 18:32:02 crc kubenswrapper[4995]: I0120 18:32:02.426063 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-msxpf" podUID="158c03d6-f3de-44c5-941b-38b94cef407c" containerName="registry-server" containerID="cri-o://e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67" gracePeriod=2 Jan 20 18:32:02 crc kubenswrapper[4995]: I0120 18:32:02.987335 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.062726 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-utilities\") pod \"158c03d6-f3de-44c5-941b-38b94cef407c\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.062824 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zmrl\" (UniqueName: \"kubernetes.io/projected/158c03d6-f3de-44c5-941b-38b94cef407c-kube-api-access-6zmrl\") pod \"158c03d6-f3de-44c5-941b-38b94cef407c\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.062895 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-catalog-content\") pod \"158c03d6-f3de-44c5-941b-38b94cef407c\" (UID: \"158c03d6-f3de-44c5-941b-38b94cef407c\") " Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.064713 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-utilities" (OuterVolumeSpecName: "utilities") pod "158c03d6-f3de-44c5-941b-38b94cef407c" (UID: "158c03d6-f3de-44c5-941b-38b94cef407c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.069488 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/158c03d6-f3de-44c5-941b-38b94cef407c-kube-api-access-6zmrl" (OuterVolumeSpecName: "kube-api-access-6zmrl") pod "158c03d6-f3de-44c5-941b-38b94cef407c" (UID: "158c03d6-f3de-44c5-941b-38b94cef407c"). InnerVolumeSpecName "kube-api-access-6zmrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.115565 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "158c03d6-f3de-44c5-941b-38b94cef407c" (UID: "158c03d6-f3de-44c5-941b-38b94cef407c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.164552 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.164593 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zmrl\" (UniqueName: \"kubernetes.io/projected/158c03d6-f3de-44c5-941b-38b94cef407c-kube-api-access-6zmrl\") on node \"crc\" DevicePath \"\"" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.164608 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/158c03d6-f3de-44c5-941b-38b94cef407c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.439781 4995 generic.go:334] "Generic (PLEG): container finished" podID="158c03d6-f3de-44c5-941b-38b94cef407c" containerID="e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67" exitCode=0 Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.439831 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msxpf" event={"ID":"158c03d6-f3de-44c5-941b-38b94cef407c","Type":"ContainerDied","Data":"e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67"} Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.439865 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-msxpf" event={"ID":"158c03d6-f3de-44c5-941b-38b94cef407c","Type":"ContainerDied","Data":"eb9200278dc2785afa7d698c70e388e89165d335738b3999a1fc3713af99bd2e"} Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.439890 4995 scope.go:117] "RemoveContainer" containerID="e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.440172 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-msxpf" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.486786 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-msxpf"] Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.492398 4995 scope.go:117] "RemoveContainer" containerID="94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.499652 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-msxpf"] Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.539853 4995 scope.go:117] "RemoveContainer" containerID="6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.589814 4995 scope.go:117] "RemoveContainer" containerID="e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67" Jan 20 18:32:03 crc kubenswrapper[4995]: E0120 18:32:03.590383 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67\": container with ID starting with e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67 not found: ID does not exist" containerID="e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.590468 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67"} err="failed to get container status \"e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67\": rpc error: code = NotFound desc = could not find container \"e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67\": container with ID starting with e0f7b09befd4e07bf4554dfc93c860fe7486d2eb928601e591bdd4e9c819db67 not found: ID does not exist" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.590521 4995 scope.go:117] "RemoveContainer" containerID="94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6" Jan 20 18:32:03 crc kubenswrapper[4995]: E0120 18:32:03.590963 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6\": container with ID starting with 94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6 not found: ID does not exist" containerID="94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.591004 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6"} err="failed to get container status \"94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6\": rpc error: code = NotFound desc = could not find container \"94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6\": container with ID starting with 94e9ce7f66f6ec19ab3f10d3496fdd41765824ffbfdb139fa9699e246b57b3f6 not found: ID does not exist" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.591032 4995 scope.go:117] "RemoveContainer" containerID="6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7" Jan 20 18:32:03 crc kubenswrapper[4995]: E0120 18:32:03.591651 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7\": container with ID starting with 6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7 not found: ID does not exist" containerID="6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7" Jan 20 18:32:03 crc kubenswrapper[4995]: I0120 18:32:03.591736 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7"} err="failed to get container status \"6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7\": rpc error: code = NotFound desc = could not find container \"6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7\": container with ID starting with 6d913b60e3d5b97fb0622ff8d7394017f7cce7d070fdaa74c5974f3839833eb7 not found: ID does not exist" Jan 20 18:32:04 crc kubenswrapper[4995]: I0120 18:32:04.008474 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="158c03d6-f3de-44c5-941b-38b94cef407c" path="/var/lib/kubelet/pods/158c03d6-f3de-44c5-941b-38b94cef407c/volumes" Jan 20 18:32:04 crc kubenswrapper[4995]: I0120 18:32:04.990440 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:32:04 crc kubenswrapper[4995]: E0120 18:32:04.990853 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:32:18 crc kubenswrapper[4995]: I0120 18:32:18.990575 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:32:18 crc kubenswrapper[4995]: E0120 18:32:18.991924 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:32:32 crc kubenswrapper[4995]: I0120 18:32:32.001923 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:32:32 crc kubenswrapper[4995]: E0120 18:32:32.003380 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:32:44 crc kubenswrapper[4995]: I0120 18:32:44.990260 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:32:44 crc kubenswrapper[4995]: E0120 18:32:44.991281 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:32:58 crc kubenswrapper[4995]: I0120 18:32:58.990470 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:32:58 crc kubenswrapper[4995]: E0120 18:32:58.992352 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:33:09 crc kubenswrapper[4995]: I0120 18:33:09.990805 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:33:09 crc kubenswrapper[4995]: E0120 18:33:09.992249 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:33:22 crc kubenswrapper[4995]: I0120 18:33:22.991293 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:33:22 crc kubenswrapper[4995]: E0120 18:33:22.992274 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:33:37 crc kubenswrapper[4995]: I0120 18:33:37.989913 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:33:37 crc kubenswrapper[4995]: E0120 18:33:37.991215 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:33:50 crc kubenswrapper[4995]: I0120 18:33:50.990311 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:33:50 crc kubenswrapper[4995]: E0120 18:33:50.991350 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:34:05 crc kubenswrapper[4995]: I0120 18:34:05.989899 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:34:06 crc kubenswrapper[4995]: I0120 18:34:06.845823 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"25f43132eb3c862360a1c12d6849146bf7f3b1ac520a834e68280ed8d27c6a18"} Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.950020 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zlhjg"] Jan 20 18:34:15 crc kubenswrapper[4995]: E0120 18:34:15.951948 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158c03d6-f3de-44c5-941b-38b94cef407c" containerName="extract-utilities" Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.951980 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="158c03d6-f3de-44c5-941b-38b94cef407c" containerName="extract-utilities" Jan 20 18:34:15 crc kubenswrapper[4995]: E0120 18:34:15.952029 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158c03d6-f3de-44c5-941b-38b94cef407c" containerName="registry-server" Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.952042 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="158c03d6-f3de-44c5-941b-38b94cef407c" containerName="registry-server" Jan 20 18:34:15 crc kubenswrapper[4995]: E0120 18:34:15.952065 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="158c03d6-f3de-44c5-941b-38b94cef407c" containerName="extract-content" Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.952115 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="158c03d6-f3de-44c5-941b-38b94cef407c" containerName="extract-content" Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.952585 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="158c03d6-f3de-44c5-941b-38b94cef407c" containerName="registry-server" Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.956028 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.968739 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zlhjg"] Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.984408 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-catalog-content\") pod \"redhat-marketplace-zlhjg\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.984497 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-utilities\") pod \"redhat-marketplace-zlhjg\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:15 crc kubenswrapper[4995]: I0120 18:34:15.985300 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ntxw\" (UniqueName: \"kubernetes.io/projected/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-kube-api-access-9ntxw\") pod \"redhat-marketplace-zlhjg\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:16 crc kubenswrapper[4995]: I0120 18:34:16.089162 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ntxw\" (UniqueName: \"kubernetes.io/projected/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-kube-api-access-9ntxw\") pod \"redhat-marketplace-zlhjg\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:16 crc kubenswrapper[4995]: I0120 18:34:16.090168 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-catalog-content\") pod \"redhat-marketplace-zlhjg\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:16 crc kubenswrapper[4995]: I0120 18:34:16.090228 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-utilities\") pod \"redhat-marketplace-zlhjg\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:16 crc kubenswrapper[4995]: I0120 18:34:16.090777 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-utilities\") pod \"redhat-marketplace-zlhjg\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:16 crc kubenswrapper[4995]: I0120 18:34:16.091149 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-catalog-content\") pod \"redhat-marketplace-zlhjg\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:16 crc kubenswrapper[4995]: I0120 18:34:16.110516 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ntxw\" (UniqueName: \"kubernetes.io/projected/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-kube-api-access-9ntxw\") pod \"redhat-marketplace-zlhjg\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:16 crc kubenswrapper[4995]: I0120 18:34:16.281789 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:16 crc kubenswrapper[4995]: I0120 18:34:16.832720 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zlhjg"] Jan 20 18:34:16 crc kubenswrapper[4995]: I0120 18:34:16.950191 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zlhjg" event={"ID":"04bf4720-7c09-4162-8ea2-ef3d0d9629d5","Type":"ContainerStarted","Data":"b1f3e3b05c7838c167028ca358557c04b0f34a3529aa55d4b773691ef5580bbe"} Jan 20 18:34:17 crc kubenswrapper[4995]: I0120 18:34:17.962851 4995 generic.go:334] "Generic (PLEG): container finished" podID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerID="58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9" exitCode=0 Jan 20 18:34:17 crc kubenswrapper[4995]: I0120 18:34:17.962909 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zlhjg" event={"ID":"04bf4720-7c09-4162-8ea2-ef3d0d9629d5","Type":"ContainerDied","Data":"58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9"} Jan 20 18:34:18 crc kubenswrapper[4995]: I0120 18:34:18.975937 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zlhjg" event={"ID":"04bf4720-7c09-4162-8ea2-ef3d0d9629d5","Type":"ContainerStarted","Data":"7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e"} Jan 20 18:34:19 crc kubenswrapper[4995]: I0120 18:34:19.989456 4995 generic.go:334] "Generic (PLEG): container finished" podID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerID="7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e" exitCode=0 Jan 20 18:34:20 crc kubenswrapper[4995]: I0120 18:34:20.017709 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zlhjg" event={"ID":"04bf4720-7c09-4162-8ea2-ef3d0d9629d5","Type":"ContainerDied","Data":"7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e"} Jan 20 18:34:21 crc kubenswrapper[4995]: I0120 18:34:21.003537 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zlhjg" event={"ID":"04bf4720-7c09-4162-8ea2-ef3d0d9629d5","Type":"ContainerStarted","Data":"b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a"} Jan 20 18:34:21 crc kubenswrapper[4995]: I0120 18:34:21.049798 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zlhjg" podStartSLOduration=3.606677341 podStartE2EDuration="6.049775256s" podCreationTimestamp="2026-01-20 18:34:15 +0000 UTC" firstStartedPulling="2026-01-20 18:34:17.965617553 +0000 UTC m=+7376.210222399" lastFinishedPulling="2026-01-20 18:34:20.408715498 +0000 UTC m=+7378.653320314" observedRunningTime="2026-01-20 18:34:21.037937628 +0000 UTC m=+7379.282542454" watchObservedRunningTime="2026-01-20 18:34:21.049775256 +0000 UTC m=+7379.294380072" Jan 20 18:34:26 crc kubenswrapper[4995]: I0120 18:34:26.282770 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:26 crc kubenswrapper[4995]: I0120 18:34:26.283397 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:26 crc kubenswrapper[4995]: I0120 18:34:26.348673 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:27 crc kubenswrapper[4995]: I0120 18:34:27.140312 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:27 crc kubenswrapper[4995]: I0120 18:34:27.199092 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zlhjg"] Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.083185 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zlhjg" podUID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerName="registry-server" containerID="cri-o://b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a" gracePeriod=2 Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.616328 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.781594 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ntxw\" (UniqueName: \"kubernetes.io/projected/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-kube-api-access-9ntxw\") pod \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.781781 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-utilities\") pod \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.781841 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-catalog-content\") pod \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\" (UID: \"04bf4720-7c09-4162-8ea2-ef3d0d9629d5\") " Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.782856 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-utilities" (OuterVolumeSpecName: "utilities") pod "04bf4720-7c09-4162-8ea2-ef3d0d9629d5" (UID: "04bf4720-7c09-4162-8ea2-ef3d0d9629d5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.788194 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-kube-api-access-9ntxw" (OuterVolumeSpecName: "kube-api-access-9ntxw") pod "04bf4720-7c09-4162-8ea2-ef3d0d9629d5" (UID: "04bf4720-7c09-4162-8ea2-ef3d0d9629d5"). InnerVolumeSpecName "kube-api-access-9ntxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.829579 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "04bf4720-7c09-4162-8ea2-ef3d0d9629d5" (UID: "04bf4720-7c09-4162-8ea2-ef3d0d9629d5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.884235 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ntxw\" (UniqueName: \"kubernetes.io/projected/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-kube-api-access-9ntxw\") on node \"crc\" DevicePath \"\"" Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.884274 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:34:29 crc kubenswrapper[4995]: I0120 18:34:29.884287 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04bf4720-7c09-4162-8ea2-ef3d0d9629d5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.097468 4995 generic.go:334] "Generic (PLEG): container finished" podID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerID="b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a" exitCode=0 Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.097519 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zlhjg" event={"ID":"04bf4720-7c09-4162-8ea2-ef3d0d9629d5","Type":"ContainerDied","Data":"b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a"} Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.097535 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zlhjg" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.097569 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zlhjg" event={"ID":"04bf4720-7c09-4162-8ea2-ef3d0d9629d5","Type":"ContainerDied","Data":"b1f3e3b05c7838c167028ca358557c04b0f34a3529aa55d4b773691ef5580bbe"} Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.097597 4995 scope.go:117] "RemoveContainer" containerID="b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.144958 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zlhjg"] Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.148296 4995 scope.go:117] "RemoveContainer" containerID="7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.155664 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zlhjg"] Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.171186 4995 scope.go:117] "RemoveContainer" containerID="58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.235190 4995 scope.go:117] "RemoveContainer" containerID="b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a" Jan 20 18:34:30 crc kubenswrapper[4995]: E0120 18:34:30.235633 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a\": container with ID starting with b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a not found: ID does not exist" containerID="b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.235688 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a"} err="failed to get container status \"b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a\": rpc error: code = NotFound desc = could not find container \"b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a\": container with ID starting with b48434a328a701f1c28e81a507bdc321cedc0375e3d35feaaa8f1b180616549a not found: ID does not exist" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.235718 4995 scope.go:117] "RemoveContainer" containerID="7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e" Jan 20 18:34:30 crc kubenswrapper[4995]: E0120 18:34:30.236256 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e\": container with ID starting with 7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e not found: ID does not exist" containerID="7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.236336 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e"} err="failed to get container status \"7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e\": rpc error: code = NotFound desc = could not find container \"7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e\": container with ID starting with 7edb73ab6b40e6be771e74a78c6ddc974f012f7aecbf91825b945f2d23dcda8e not found: ID does not exist" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.236370 4995 scope.go:117] "RemoveContainer" containerID="58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9" Jan 20 18:34:30 crc kubenswrapper[4995]: E0120 18:34:30.236897 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9\": container with ID starting with 58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9 not found: ID does not exist" containerID="58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9" Jan 20 18:34:30 crc kubenswrapper[4995]: I0120 18:34:30.236921 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9"} err="failed to get container status \"58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9\": rpc error: code = NotFound desc = could not find container \"58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9\": container with ID starting with 58b17913a8da17e078bccfca79437699ef0096502d25359033e0629d602616d9 not found: ID does not exist" Jan 20 18:34:32 crc kubenswrapper[4995]: I0120 18:34:32.003404 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" path="/var/lib/kubelet/pods/04bf4720-7c09-4162-8ea2-ef3d0d9629d5/volumes" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.121847 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hv6hg"] Jan 20 18:35:22 crc kubenswrapper[4995]: E0120 18:35:22.122896 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerName="registry-server" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.122914 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerName="registry-server" Jan 20 18:35:22 crc kubenswrapper[4995]: E0120 18:35:22.122958 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerName="extract-content" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.122967 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerName="extract-content" Jan 20 18:35:22 crc kubenswrapper[4995]: E0120 18:35:22.122991 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerName="extract-utilities" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.122999 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerName="extract-utilities" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.123307 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="04bf4720-7c09-4162-8ea2-ef3d0d9629d5" containerName="registry-server" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.125018 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.134754 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hv6hg"] Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.233657 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-catalog-content\") pod \"community-operators-hv6hg\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.233825 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-utilities\") pod \"community-operators-hv6hg\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.233901 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blgg8\" (UniqueName: \"kubernetes.io/projected/4f95c60b-6f36-4750-b4c8-c386b242abc2-kube-api-access-blgg8\") pod \"community-operators-hv6hg\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.335123 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blgg8\" (UniqueName: \"kubernetes.io/projected/4f95c60b-6f36-4750-b4c8-c386b242abc2-kube-api-access-blgg8\") pod \"community-operators-hv6hg\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.335611 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-catalog-content\") pod \"community-operators-hv6hg\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.335799 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-utilities\") pod \"community-operators-hv6hg\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.336329 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-utilities\") pod \"community-operators-hv6hg\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.336606 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-catalog-content\") pod \"community-operators-hv6hg\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.357883 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blgg8\" (UniqueName: \"kubernetes.io/projected/4f95c60b-6f36-4750-b4c8-c386b242abc2-kube-api-access-blgg8\") pod \"community-operators-hv6hg\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.455887 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:22 crc kubenswrapper[4995]: I0120 18:35:22.942925 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hv6hg"] Jan 20 18:35:23 crc kubenswrapper[4995]: I0120 18:35:23.672486 4995 generic.go:334] "Generic (PLEG): container finished" podID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerID="52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516" exitCode=0 Jan 20 18:35:23 crc kubenswrapper[4995]: I0120 18:35:23.672570 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hv6hg" event={"ID":"4f95c60b-6f36-4750-b4c8-c386b242abc2","Type":"ContainerDied","Data":"52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516"} Jan 20 18:35:23 crc kubenswrapper[4995]: I0120 18:35:23.672789 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hv6hg" event={"ID":"4f95c60b-6f36-4750-b4c8-c386b242abc2","Type":"ContainerStarted","Data":"989a3b81e8ab7bd350865ff9f982dd707eaf7874691cffd693fce9e03f705322"} Jan 20 18:35:25 crc kubenswrapper[4995]: I0120 18:35:25.701931 4995 generic.go:334] "Generic (PLEG): container finished" podID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerID="aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4" exitCode=0 Jan 20 18:35:25 crc kubenswrapper[4995]: I0120 18:35:25.702058 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hv6hg" event={"ID":"4f95c60b-6f36-4750-b4c8-c386b242abc2","Type":"ContainerDied","Data":"aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4"} Jan 20 18:35:26 crc kubenswrapper[4995]: I0120 18:35:26.720347 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hv6hg" event={"ID":"4f95c60b-6f36-4750-b4c8-c386b242abc2","Type":"ContainerStarted","Data":"33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e"} Jan 20 18:35:26 crc kubenswrapper[4995]: I0120 18:35:26.752314 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hv6hg" podStartSLOduration=2.285174266 podStartE2EDuration="4.752289956s" podCreationTimestamp="2026-01-20 18:35:22 +0000 UTC" firstStartedPulling="2026-01-20 18:35:23.677361062 +0000 UTC m=+7441.921965898" lastFinishedPulling="2026-01-20 18:35:26.144476772 +0000 UTC m=+7444.389081588" observedRunningTime="2026-01-20 18:35:26.74199051 +0000 UTC m=+7444.986595326" watchObservedRunningTime="2026-01-20 18:35:26.752289956 +0000 UTC m=+7444.996894782" Jan 20 18:35:32 crc kubenswrapper[4995]: I0120 18:35:32.456047 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:32 crc kubenswrapper[4995]: I0120 18:35:32.456989 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:32 crc kubenswrapper[4995]: I0120 18:35:32.513334 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:32 crc kubenswrapper[4995]: I0120 18:35:32.834730 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:32 crc kubenswrapper[4995]: I0120 18:35:32.886233 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hv6hg"] Jan 20 18:35:34 crc kubenswrapper[4995]: I0120 18:35:34.828543 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hv6hg" podUID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerName="registry-server" containerID="cri-o://33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e" gracePeriod=2 Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.497204 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.636005 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-utilities\") pod \"4f95c60b-6f36-4750-b4c8-c386b242abc2\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.636108 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blgg8\" (UniqueName: \"kubernetes.io/projected/4f95c60b-6f36-4750-b4c8-c386b242abc2-kube-api-access-blgg8\") pod \"4f95c60b-6f36-4750-b4c8-c386b242abc2\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.636170 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-catalog-content\") pod \"4f95c60b-6f36-4750-b4c8-c386b242abc2\" (UID: \"4f95c60b-6f36-4750-b4c8-c386b242abc2\") " Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.637644 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-utilities" (OuterVolumeSpecName: "utilities") pod "4f95c60b-6f36-4750-b4c8-c386b242abc2" (UID: "4f95c60b-6f36-4750-b4c8-c386b242abc2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.644742 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f95c60b-6f36-4750-b4c8-c386b242abc2-kube-api-access-blgg8" (OuterVolumeSpecName: "kube-api-access-blgg8") pod "4f95c60b-6f36-4750-b4c8-c386b242abc2" (UID: "4f95c60b-6f36-4750-b4c8-c386b242abc2"). InnerVolumeSpecName "kube-api-access-blgg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.649442 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.649503 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blgg8\" (UniqueName: \"kubernetes.io/projected/4f95c60b-6f36-4750-b4c8-c386b242abc2-kube-api-access-blgg8\") on node \"crc\" DevicePath \"\"" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.684639 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4f95c60b-6f36-4750-b4c8-c386b242abc2" (UID: "4f95c60b-6f36-4750-b4c8-c386b242abc2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.750773 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f95c60b-6f36-4750-b4c8-c386b242abc2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.837962 4995 generic.go:334] "Generic (PLEG): container finished" podID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerID="33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e" exitCode=0 Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.838043 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hv6hg" event={"ID":"4f95c60b-6f36-4750-b4c8-c386b242abc2","Type":"ContainerDied","Data":"33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e"} Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.838126 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hv6hg" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.839330 4995 scope.go:117] "RemoveContainer" containerID="33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.839313 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hv6hg" event={"ID":"4f95c60b-6f36-4750-b4c8-c386b242abc2","Type":"ContainerDied","Data":"989a3b81e8ab7bd350865ff9f982dd707eaf7874691cffd693fce9e03f705322"} Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.863897 4995 scope.go:117] "RemoveContainer" containerID="aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.891442 4995 scope.go:117] "RemoveContainer" containerID="52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.913231 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hv6hg"] Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.927777 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hv6hg"] Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.993281 4995 scope.go:117] "RemoveContainer" containerID="33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e" Jan 20 18:35:35 crc kubenswrapper[4995]: E0120 18:35:35.994121 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e\": container with ID starting with 33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e not found: ID does not exist" containerID="33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.994147 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e"} err="failed to get container status \"33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e\": rpc error: code = NotFound desc = could not find container \"33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e\": container with ID starting with 33557a8a2794adfc3306de5fb66d88b10bafd6010902d0b7cca57a97b0c9c67e not found: ID does not exist" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.994166 4995 scope.go:117] "RemoveContainer" containerID="aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4" Jan 20 18:35:35 crc kubenswrapper[4995]: E0120 18:35:35.994391 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4\": container with ID starting with aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4 not found: ID does not exist" containerID="aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.994432 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4"} err="failed to get container status \"aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4\": rpc error: code = NotFound desc = could not find container \"aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4\": container with ID starting with aaec4935af20cd2ffb723fe913fc3a780c55a0f582ed8631e39777aa63ec81b4 not found: ID does not exist" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.994459 4995 scope.go:117] "RemoveContainer" containerID="52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516" Jan 20 18:35:35 crc kubenswrapper[4995]: E0120 18:35:35.994715 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516\": container with ID starting with 52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516 not found: ID does not exist" containerID="52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516" Jan 20 18:35:35 crc kubenswrapper[4995]: I0120 18:35:35.994736 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516"} err="failed to get container status \"52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516\": rpc error: code = NotFound desc = could not find container \"52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516\": container with ID starting with 52c9e1d52956c9505358ac76e154a83f052cf33fba420c3e48c96b462e6c8516 not found: ID does not exist" Jan 20 18:35:36 crc kubenswrapper[4995]: I0120 18:35:36.002722 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f95c60b-6f36-4750-b4c8-c386b242abc2" path="/var/lib/kubelet/pods/4f95c60b-6f36-4750-b4c8-c386b242abc2/volumes" Jan 20 18:36:30 crc kubenswrapper[4995]: I0120 18:36:30.571572 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:36:30 crc kubenswrapper[4995]: I0120 18:36:30.572285 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:37:00 crc kubenswrapper[4995]: I0120 18:37:00.571219 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:37:00 crc kubenswrapper[4995]: I0120 18:37:00.572961 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.508558 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9swzl"] Jan 20 18:37:25 crc kubenswrapper[4995]: E0120 18:37:25.510153 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerName="extract-utilities" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.510173 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerName="extract-utilities" Jan 20 18:37:25 crc kubenswrapper[4995]: E0120 18:37:25.510228 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerName="registry-server" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.510238 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerName="registry-server" Jan 20 18:37:25 crc kubenswrapper[4995]: E0120 18:37:25.510439 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerName="extract-content" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.510451 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerName="extract-content" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.511118 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f95c60b-6f36-4750-b4c8-c386b242abc2" containerName="registry-server" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.515429 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.517285 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9swzl"] Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.616954 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5bbk\" (UniqueName: \"kubernetes.io/projected/39a34cf8-36c4-4378-982f-2b9919bef10c-kube-api-access-r5bbk\") pod \"redhat-operators-9swzl\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.617318 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-catalog-content\") pod \"redhat-operators-9swzl\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.617373 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-utilities\") pod \"redhat-operators-9swzl\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.719936 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5bbk\" (UniqueName: \"kubernetes.io/projected/39a34cf8-36c4-4378-982f-2b9919bef10c-kube-api-access-r5bbk\") pod \"redhat-operators-9swzl\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.720155 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-catalog-content\") pod \"redhat-operators-9swzl\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.720215 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-utilities\") pod \"redhat-operators-9swzl\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.720849 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-utilities\") pod \"redhat-operators-9swzl\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.722146 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-catalog-content\") pod \"redhat-operators-9swzl\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.747965 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5bbk\" (UniqueName: \"kubernetes.io/projected/39a34cf8-36c4-4378-982f-2b9919bef10c-kube-api-access-r5bbk\") pod \"redhat-operators-9swzl\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:25 crc kubenswrapper[4995]: I0120 18:37:25.848660 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:26 crc kubenswrapper[4995]: I0120 18:37:26.336873 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9swzl"] Jan 20 18:37:27 crc kubenswrapper[4995]: I0120 18:37:27.096693 4995 generic.go:334] "Generic (PLEG): container finished" podID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerID="89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0" exitCode=0 Jan 20 18:37:27 crc kubenswrapper[4995]: I0120 18:37:27.096738 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9swzl" event={"ID":"39a34cf8-36c4-4378-982f-2b9919bef10c","Type":"ContainerDied","Data":"89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0"} Jan 20 18:37:27 crc kubenswrapper[4995]: I0120 18:37:27.098261 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9swzl" event={"ID":"39a34cf8-36c4-4378-982f-2b9919bef10c","Type":"ContainerStarted","Data":"eb61629723d9b216ab6f734e1db6f2c099d54e8256d3f421162ff01ac823f3a9"} Jan 20 18:37:27 crc kubenswrapper[4995]: I0120 18:37:27.098781 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 18:37:29 crc kubenswrapper[4995]: I0120 18:37:29.129012 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9swzl" event={"ID":"39a34cf8-36c4-4378-982f-2b9919bef10c","Type":"ContainerStarted","Data":"464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627"} Jan 20 18:37:30 crc kubenswrapper[4995]: I0120 18:37:30.594720 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:37:30 crc kubenswrapper[4995]: I0120 18:37:30.595037 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:37:30 crc kubenswrapper[4995]: I0120 18:37:30.595119 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:37:30 crc kubenswrapper[4995]: I0120 18:37:30.595672 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"25f43132eb3c862360a1c12d6849146bf7f3b1ac520a834e68280ed8d27c6a18"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:37:30 crc kubenswrapper[4995]: I0120 18:37:30.595736 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://25f43132eb3c862360a1c12d6849146bf7f3b1ac520a834e68280ed8d27c6a18" gracePeriod=600 Jan 20 18:37:31 crc kubenswrapper[4995]: I0120 18:37:31.160932 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="25f43132eb3c862360a1c12d6849146bf7f3b1ac520a834e68280ed8d27c6a18" exitCode=0 Jan 20 18:37:31 crc kubenswrapper[4995]: I0120 18:37:31.161022 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"25f43132eb3c862360a1c12d6849146bf7f3b1ac520a834e68280ed8d27c6a18"} Jan 20 18:37:31 crc kubenswrapper[4995]: I0120 18:37:31.161126 4995 scope.go:117] "RemoveContainer" containerID="ca6ae446ed8057a892bf3f19fdd0ddbb9cdc010510217b4005b29aea27ff5202" Jan 20 18:37:31 crc kubenswrapper[4995]: I0120 18:37:31.169655 4995 generic.go:334] "Generic (PLEG): container finished" podID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerID="464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627" exitCode=0 Jan 20 18:37:31 crc kubenswrapper[4995]: I0120 18:37:31.169740 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9swzl" event={"ID":"39a34cf8-36c4-4378-982f-2b9919bef10c","Type":"ContainerDied","Data":"464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627"} Jan 20 18:37:32 crc kubenswrapper[4995]: I0120 18:37:32.188103 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9swzl" event={"ID":"39a34cf8-36c4-4378-982f-2b9919bef10c","Type":"ContainerStarted","Data":"ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468"} Jan 20 18:37:32 crc kubenswrapper[4995]: I0120 18:37:32.191354 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796"} Jan 20 18:37:32 crc kubenswrapper[4995]: I0120 18:37:32.224872 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9swzl" podStartSLOduration=2.644808993 podStartE2EDuration="7.224853711s" podCreationTimestamp="2026-01-20 18:37:25 +0000 UTC" firstStartedPulling="2026-01-20 18:37:27.098581885 +0000 UTC m=+7565.343186691" lastFinishedPulling="2026-01-20 18:37:31.678626583 +0000 UTC m=+7569.923231409" observedRunningTime="2026-01-20 18:37:32.215549931 +0000 UTC m=+7570.460154777" watchObservedRunningTime="2026-01-20 18:37:32.224853711 +0000 UTC m=+7570.469458517" Jan 20 18:37:35 crc kubenswrapper[4995]: I0120 18:37:35.849678 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:35 crc kubenswrapper[4995]: I0120 18:37:35.851321 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:36 crc kubenswrapper[4995]: I0120 18:37:36.907403 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9swzl" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerName="registry-server" probeResult="failure" output=< Jan 20 18:37:36 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 18:37:36 crc kubenswrapper[4995]: > Jan 20 18:37:45 crc kubenswrapper[4995]: I0120 18:37:45.899560 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:45 crc kubenswrapper[4995]: I0120 18:37:45.953493 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:47 crc kubenswrapper[4995]: I0120 18:37:47.479706 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9swzl"] Jan 20 18:37:47 crc kubenswrapper[4995]: I0120 18:37:47.480541 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9swzl" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerName="registry-server" containerID="cri-o://ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468" gracePeriod=2 Jan 20 18:37:47 crc kubenswrapper[4995]: E0120 18:37:47.764347 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39a34cf8_36c4_4378_982f_2b9919bef10c.slice/crio-conmon-ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468.scope\": RecentStats: unable to find data in memory cache]" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.011425 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.113471 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r5bbk\" (UniqueName: \"kubernetes.io/projected/39a34cf8-36c4-4378-982f-2b9919bef10c-kube-api-access-r5bbk\") pod \"39a34cf8-36c4-4378-982f-2b9919bef10c\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.113839 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-utilities\") pod \"39a34cf8-36c4-4378-982f-2b9919bef10c\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.114005 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-catalog-content\") pod \"39a34cf8-36c4-4378-982f-2b9919bef10c\" (UID: \"39a34cf8-36c4-4378-982f-2b9919bef10c\") " Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.114545 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-utilities" (OuterVolumeSpecName: "utilities") pod "39a34cf8-36c4-4378-982f-2b9919bef10c" (UID: "39a34cf8-36c4-4378-982f-2b9919bef10c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.115049 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.119508 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39a34cf8-36c4-4378-982f-2b9919bef10c-kube-api-access-r5bbk" (OuterVolumeSpecName: "kube-api-access-r5bbk") pod "39a34cf8-36c4-4378-982f-2b9919bef10c" (UID: "39a34cf8-36c4-4378-982f-2b9919bef10c"). InnerVolumeSpecName "kube-api-access-r5bbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.217375 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r5bbk\" (UniqueName: \"kubernetes.io/projected/39a34cf8-36c4-4378-982f-2b9919bef10c-kube-api-access-r5bbk\") on node \"crc\" DevicePath \"\"" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.266287 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39a34cf8-36c4-4378-982f-2b9919bef10c" (UID: "39a34cf8-36c4-4378-982f-2b9919bef10c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.319454 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39a34cf8-36c4-4378-982f-2b9919bef10c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.359807 4995 generic.go:334] "Generic (PLEG): container finished" podID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerID="ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468" exitCode=0 Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.359860 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9swzl" event={"ID":"39a34cf8-36c4-4378-982f-2b9919bef10c","Type":"ContainerDied","Data":"ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468"} Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.359924 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9swzl" event={"ID":"39a34cf8-36c4-4378-982f-2b9919bef10c","Type":"ContainerDied","Data":"eb61629723d9b216ab6f734e1db6f2c099d54e8256d3f421162ff01ac823f3a9"} Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.359939 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9swzl" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.359951 4995 scope.go:117] "RemoveContainer" containerID="ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.397142 4995 scope.go:117] "RemoveContainer" containerID="464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.431034 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9swzl"] Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.446773 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9swzl"] Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.452298 4995 scope.go:117] "RemoveContainer" containerID="89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.493655 4995 scope.go:117] "RemoveContainer" containerID="ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468" Jan 20 18:37:48 crc kubenswrapper[4995]: E0120 18:37:48.494210 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468\": container with ID starting with ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468 not found: ID does not exist" containerID="ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.494254 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468"} err="failed to get container status \"ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468\": rpc error: code = NotFound desc = could not find container \"ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468\": container with ID starting with ca10ddf2b9c350d2043120d2ef7a1e2ac8b350952cfbd3bf22ee25822cbca468 not found: ID does not exist" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.494285 4995 scope.go:117] "RemoveContainer" containerID="464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627" Jan 20 18:37:48 crc kubenswrapper[4995]: E0120 18:37:48.494797 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627\": container with ID starting with 464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627 not found: ID does not exist" containerID="464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.494821 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627"} err="failed to get container status \"464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627\": rpc error: code = NotFound desc = could not find container \"464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627\": container with ID starting with 464c2bbf4185c11d745ea2934468d6000d4f855495fbaa57cd114f25cdf96627 not found: ID does not exist" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.494838 4995 scope.go:117] "RemoveContainer" containerID="89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0" Jan 20 18:37:48 crc kubenswrapper[4995]: E0120 18:37:48.495581 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0\": container with ID starting with 89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0 not found: ID does not exist" containerID="89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0" Jan 20 18:37:48 crc kubenswrapper[4995]: I0120 18:37:48.495627 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0"} err="failed to get container status \"89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0\": rpc error: code = NotFound desc = could not find container \"89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0\": container with ID starting with 89ecce2d073a84f22cc0a98ff46c799b522c031f7a855ddac35a3018d0af98c0 not found: ID does not exist" Jan 20 18:37:50 crc kubenswrapper[4995]: I0120 18:37:50.002950 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" path="/var/lib/kubelet/pods/39a34cf8-36c4-4378-982f-2b9919bef10c/volumes" Jan 20 18:40:00 crc kubenswrapper[4995]: I0120 18:40:00.572234 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:40:00 crc kubenswrapper[4995]: I0120 18:40:00.572984 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:40:30 crc kubenswrapper[4995]: I0120 18:40:30.572167 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:40:30 crc kubenswrapper[4995]: I0120 18:40:30.572765 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:41:00 crc kubenswrapper[4995]: I0120 18:41:00.572319 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:41:00 crc kubenswrapper[4995]: I0120 18:41:00.572932 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:41:00 crc kubenswrapper[4995]: I0120 18:41:00.572991 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:41:00 crc kubenswrapper[4995]: I0120 18:41:00.573983 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:41:00 crc kubenswrapper[4995]: I0120 18:41:00.574069 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" gracePeriod=600 Jan 20 18:41:00 crc kubenswrapper[4995]: E0120 18:41:00.695111 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:41:01 crc kubenswrapper[4995]: I0120 18:41:01.457594 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" exitCode=0 Jan 20 18:41:01 crc kubenswrapper[4995]: I0120 18:41:01.457849 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796"} Jan 20 18:41:01 crc kubenswrapper[4995]: I0120 18:41:01.457986 4995 scope.go:117] "RemoveContainer" containerID="25f43132eb3c862360a1c12d6849146bf7f3b1ac520a834e68280ed8d27c6a18" Jan 20 18:41:01 crc kubenswrapper[4995]: I0120 18:41:01.458941 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:41:01 crc kubenswrapper[4995]: E0120 18:41:01.459405 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:41:14 crc kubenswrapper[4995]: I0120 18:41:14.989270 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:41:14 crc kubenswrapper[4995]: E0120 18:41:14.990077 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.477318 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-mblc8/must-gather-x8rsq"] Jan 20 18:41:24 crc kubenswrapper[4995]: E0120 18:41:24.478213 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerName="extract-content" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.478228 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerName="extract-content" Jan 20 18:41:24 crc kubenswrapper[4995]: E0120 18:41:24.478259 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerName="registry-server" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.478268 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerName="registry-server" Jan 20 18:41:24 crc kubenswrapper[4995]: E0120 18:41:24.478286 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerName="extract-utilities" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.478295 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerName="extract-utilities" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.478485 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="39a34cf8-36c4-4378-982f-2b9919bef10c" containerName="registry-server" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.479582 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/must-gather-x8rsq" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.483901 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-mblc8"/"default-dockercfg-g785j" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.483982 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-mblc8"/"openshift-service-ca.crt" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.484037 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-mblc8"/"kube-root-ca.crt" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.502135 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-mblc8/must-gather-x8rsq"] Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.613323 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ffbe0b40-4efc-48e4-a2b4-00c4ccab657a-must-gather-output\") pod \"must-gather-x8rsq\" (UID: \"ffbe0b40-4efc-48e4-a2b4-00c4ccab657a\") " pod="openshift-must-gather-mblc8/must-gather-x8rsq" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.613839 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sdrp\" (UniqueName: \"kubernetes.io/projected/ffbe0b40-4efc-48e4-a2b4-00c4ccab657a-kube-api-access-7sdrp\") pod \"must-gather-x8rsq\" (UID: \"ffbe0b40-4efc-48e4-a2b4-00c4ccab657a\") " pod="openshift-must-gather-mblc8/must-gather-x8rsq" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.715768 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sdrp\" (UniqueName: \"kubernetes.io/projected/ffbe0b40-4efc-48e4-a2b4-00c4ccab657a-kube-api-access-7sdrp\") pod \"must-gather-x8rsq\" (UID: \"ffbe0b40-4efc-48e4-a2b4-00c4ccab657a\") " pod="openshift-must-gather-mblc8/must-gather-x8rsq" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.715916 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ffbe0b40-4efc-48e4-a2b4-00c4ccab657a-must-gather-output\") pod \"must-gather-x8rsq\" (UID: \"ffbe0b40-4efc-48e4-a2b4-00c4ccab657a\") " pod="openshift-must-gather-mblc8/must-gather-x8rsq" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.716475 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ffbe0b40-4efc-48e4-a2b4-00c4ccab657a-must-gather-output\") pod \"must-gather-x8rsq\" (UID: \"ffbe0b40-4efc-48e4-a2b4-00c4ccab657a\") " pod="openshift-must-gather-mblc8/must-gather-x8rsq" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.732439 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sdrp\" (UniqueName: \"kubernetes.io/projected/ffbe0b40-4efc-48e4-a2b4-00c4ccab657a-kube-api-access-7sdrp\") pod \"must-gather-x8rsq\" (UID: \"ffbe0b40-4efc-48e4-a2b4-00c4ccab657a\") " pod="openshift-must-gather-mblc8/must-gather-x8rsq" Jan 20 18:41:24 crc kubenswrapper[4995]: I0120 18:41:24.801443 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/must-gather-x8rsq" Jan 20 18:41:25 crc kubenswrapper[4995]: I0120 18:41:25.338220 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-mblc8/must-gather-x8rsq"] Jan 20 18:41:25 crc kubenswrapper[4995]: W0120 18:41:25.341554 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffbe0b40_4efc_48e4_a2b4_00c4ccab657a.slice/crio-f9e62aba6dc8733b1cbd49006c2e00dec5afeecd1c0c044c896a7f71eefaf2a1 WatchSource:0}: Error finding container f9e62aba6dc8733b1cbd49006c2e00dec5afeecd1c0c044c896a7f71eefaf2a1: Status 404 returned error can't find the container with id f9e62aba6dc8733b1cbd49006c2e00dec5afeecd1c0c044c896a7f71eefaf2a1 Jan 20 18:41:25 crc kubenswrapper[4995]: I0120 18:41:25.768067 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/must-gather-x8rsq" event={"ID":"ffbe0b40-4efc-48e4-a2b4-00c4ccab657a","Type":"ContainerStarted","Data":"ba8bd8baef26bbaee826ebd57116ca2de76adda9f9e09c3d39ab5281e780b2f8"} Jan 20 18:41:25 crc kubenswrapper[4995]: I0120 18:41:25.768363 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/must-gather-x8rsq" event={"ID":"ffbe0b40-4efc-48e4-a2b4-00c4ccab657a","Type":"ContainerStarted","Data":"f9e62aba6dc8733b1cbd49006c2e00dec5afeecd1c0c044c896a7f71eefaf2a1"} Jan 20 18:41:26 crc kubenswrapper[4995]: I0120 18:41:26.780494 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/must-gather-x8rsq" event={"ID":"ffbe0b40-4efc-48e4-a2b4-00c4ccab657a","Type":"ContainerStarted","Data":"ea7587d3d19a2799ce652e95c2dc99c45ea9c2e5e6bd862d2368b9fda6e2562d"} Jan 20 18:41:26 crc kubenswrapper[4995]: I0120 18:41:26.799456 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-mblc8/must-gather-x8rsq" podStartSLOduration=2.7994319340000002 podStartE2EDuration="2.799431934s" podCreationTimestamp="2026-01-20 18:41:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 18:41:26.793610068 +0000 UTC m=+7805.038214874" watchObservedRunningTime="2026-01-20 18:41:26.799431934 +0000 UTC m=+7805.044036740" Jan 20 18:41:29 crc kubenswrapper[4995]: I0120 18:41:29.971885 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-mblc8/crc-debug-mffrp"] Jan 20 18:41:29 crc kubenswrapper[4995]: I0120 18:41:29.973461 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:41:29 crc kubenswrapper[4995]: I0120 18:41:29.990312 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:41:29 crc kubenswrapper[4995]: E0120 18:41:29.990506 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.021667 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wr66\" (UniqueName: \"kubernetes.io/projected/171b8ac0-9936-43fc-a42d-a685a28f7cc2-kube-api-access-5wr66\") pod \"crc-debug-mffrp\" (UID: \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\") " pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.021956 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/171b8ac0-9936-43fc-a42d-a685a28f7cc2-host\") pod \"crc-debug-mffrp\" (UID: \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\") " pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.124000 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/171b8ac0-9936-43fc-a42d-a685a28f7cc2-host\") pod \"crc-debug-mffrp\" (UID: \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\") " pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.124099 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wr66\" (UniqueName: \"kubernetes.io/projected/171b8ac0-9936-43fc-a42d-a685a28f7cc2-kube-api-access-5wr66\") pod \"crc-debug-mffrp\" (UID: \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\") " pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.124643 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/171b8ac0-9936-43fc-a42d-a685a28f7cc2-host\") pod \"crc-debug-mffrp\" (UID: \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\") " pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.150271 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wr66\" (UniqueName: \"kubernetes.io/projected/171b8ac0-9936-43fc-a42d-a685a28f7cc2-kube-api-access-5wr66\") pod \"crc-debug-mffrp\" (UID: \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\") " pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.296306 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:41:30 crc kubenswrapper[4995]: W0120 18:41:30.328838 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod171b8ac0_9936_43fc_a42d_a685a28f7cc2.slice/crio-7324b967f21800d3d3d9688dabe7b623a8409498e427fc500973d10c45e31886 WatchSource:0}: Error finding container 7324b967f21800d3d3d9688dabe7b623a8409498e427fc500973d10c45e31886: Status 404 returned error can't find the container with id 7324b967f21800d3d3d9688dabe7b623a8409498e427fc500973d10c45e31886 Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.833288 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/crc-debug-mffrp" event={"ID":"171b8ac0-9936-43fc-a42d-a685a28f7cc2","Type":"ContainerStarted","Data":"f4c68c88876aa7b959c7838f2308a04229b4910c00f8af68d96bb469ff9285c3"} Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.834516 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/crc-debug-mffrp" event={"ID":"171b8ac0-9936-43fc-a42d-a685a28f7cc2","Type":"ContainerStarted","Data":"7324b967f21800d3d3d9688dabe7b623a8409498e427fc500973d10c45e31886"} Jan 20 18:41:30 crc kubenswrapper[4995]: I0120 18:41:30.849042 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-mblc8/crc-debug-mffrp" podStartSLOduration=1.849014978 podStartE2EDuration="1.849014978s" podCreationTimestamp="2026-01-20 18:41:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 18:41:30.84574867 +0000 UTC m=+7809.090353486" watchObservedRunningTime="2026-01-20 18:41:30.849014978 +0000 UTC m=+7809.093619814" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.631715 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-674779b598-44vdg_87360161-2c16-453b-bfeb-649cd107fdf0/barbican-api-log/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.640276 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-674779b598-44vdg_87360161-2c16-453b-bfeb-649cd107fdf0/barbican-api/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.697957 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-76684d5cb6-ln2nh_82750a54-2446-49e7-8251-7ae6f228dc49/barbican-keystone-listener-log/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.710333 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-76684d5cb6-ln2nh_82750a54-2446-49e7-8251-7ae6f228dc49/barbican-keystone-listener/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.726453 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-b68c6cc67-mvcbt_2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5/barbican-worker-log/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.733995 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-b68c6cc67-mvcbt_2d179ee5-2bdf-4416-bf94-e0a14b5d5cf5/barbican-worker/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.796342 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-g484b_7426fa32-40ee-4b5e-9d5a-962505929c91/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.854033 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_31630944-4dd8-4460-b8b3-d87157e2a0ef/ceilometer-central-agent/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.899360 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_31630944-4dd8-4460-b8b3-d87157e2a0ef/ceilometer-notification-agent/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.915358 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_31630944-4dd8-4460-b8b3-d87157e2a0ef/sg-core/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.950557 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_31630944-4dd8-4460-b8b3-d87157e2a0ef/proxy-httpd/0.log" Jan 20 18:41:32 crc kubenswrapper[4995]: I0120 18:41:32.986388 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_59abbdb2-429a-473e-ae6b-8f731b6cf17d/cinder-api-log/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.100471 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_59abbdb2-429a-473e-ae6b-8f731b6cf17d/cinder-api/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.158717 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_600e23cc-0af2-4f67-a17b-a69f4753f7f5/cinder-scheduler/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.228332 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_600e23cc-0af2-4f67-a17b-a69f4753f7f5/probe/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.254126 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-t744h_6d2d90c1-a32f-4ec1-82e9-4d4440542e43/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.280967 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-j47qj_b46e1f63-68f8-4cb0-835d-5d35ece39037/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.459829 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-b2hmj_4d44782e-c760-4297-8d8b-8e87526ffbdb/dnsmasq-dns/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.465070 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-b2hmj_4d44782e-c760-4297-8d8b-8e87526ffbdb/init/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.490941 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-7mlhq_d73cf0b7-6fb8-4b4c-b6bd-acb174f44890/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.501977 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_4a7fd66d-0211-429d-8dfa-7a29ca98ab51/glance-log/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.530125 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_4a7fd66d-0211-429d-8dfa-7a29ca98ab51/glance-httpd/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.551343 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_b40b3bf2-fecd-4b7b-8110-7f15651792f3/glance-log/0.log" Jan 20 18:41:33 crc kubenswrapper[4995]: I0120 18:41:33.590567 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_b40b3bf2-fecd-4b7b-8110-7f15651792f3/glance-httpd/0.log" Jan 20 18:41:34 crc kubenswrapper[4995]: I0120 18:41:34.623773 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7cd588cc5b-pmhlg_83a7df1c-c59a-4a4c-b34d-df9fc6711aea/horizon-log/0.log" Jan 20 18:41:34 crc kubenswrapper[4995]: I0120 18:41:34.735756 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7cd588cc5b-pmhlg_83a7df1c-c59a-4a4c-b34d-df9fc6711aea/horizon/0.log" Jan 20 18:41:34 crc kubenswrapper[4995]: I0120 18:41:34.771221 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-m4z4z_f8079eec-4ec7-4979-8cd9-531c61418782/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:34 crc kubenswrapper[4995]: I0120 18:41:34.800718 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-rnrwr_f6888ab8-4be9-45c2-b50d-46927fd64cba/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:35 crc kubenswrapper[4995]: I0120 18:41:35.267090 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7f99b88f98-w6ztm_d2343e74-3182-46e7-b4d2-7d9c35964fab/keystone-api/0.log" Jan 20 18:41:35 crc kubenswrapper[4995]: I0120 18:41:35.274198 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29482141-gpst2_f2050c7c-ffc8-4deb-89d8-f6cc0ee15601/keystone-cron/0.log" Jan 20 18:41:35 crc kubenswrapper[4995]: I0120 18:41:35.285242 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29482201-zttwb_a190e372-ffc1-43d0-bb58-b44814c479ed/keystone-cron/0.log" Jan 20 18:41:35 crc kubenswrapper[4995]: I0120 18:41:35.297199 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_0e5570d0-a1c7-46f5-a5f6-529ad06cf05f/kube-state-metrics/0.log" Jan 20 18:41:35 crc kubenswrapper[4995]: I0120 18:41:35.346475 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-pwrsz_34c57bed-2d89-4ce3-9613-eb3ec4fb222b/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:44 crc kubenswrapper[4995]: I0120 18:41:44.989136 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:41:44 crc kubenswrapper[4995]: E0120 18:41:44.989747 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:41:52 crc kubenswrapper[4995]: I0120 18:41:52.337524 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_f951f50c-486d-4038-a43a-4d40fa1812de/memcached/0.log" Jan 20 18:41:52 crc kubenswrapper[4995]: I0120 18:41:52.542300 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7857b9874f-85h9n_121cce9d-e190-44bf-b332-7b268c2ffd26/neutron-api/0.log" Jan 20 18:41:52 crc kubenswrapper[4995]: I0120 18:41:52.653865 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7857b9874f-85h9n_121cce9d-e190-44bf-b332-7b268c2ffd26/neutron-httpd/0.log" Jan 20 18:41:52 crc kubenswrapper[4995]: I0120 18:41:52.684922 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-64jxx_1fd4a738-bf40-4e76-9ee2-79a8042e7c07/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:53 crc kubenswrapper[4995]: I0120 18:41:53.329898 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d6dd58d8-2dec-4e9a-b9b7-78f585378448/nova-api-log/0.log" Jan 20 18:41:54 crc kubenswrapper[4995]: I0120 18:41:54.492296 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_d6dd58d8-2dec-4e9a-b9b7-78f585378448/nova-api-api/0.log" Jan 20 18:41:54 crc kubenswrapper[4995]: I0120 18:41:54.696063 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_18edb0dd-f0be-4f0e-b860-cf6cc5b67745/nova-cell0-conductor-conductor/0.log" Jan 20 18:41:54 crc kubenswrapper[4995]: I0120 18:41:54.818809 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_68ec0231-a7a1-45be-afbf-e66cd2a68d38/nova-cell1-conductor-conductor/0.log" Jan 20 18:41:54 crc kubenswrapper[4995]: I0120 18:41:54.966701 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_2a0df207-0ec6-420e-9f84-7ea1d4e6b469/nova-cell1-novncproxy-novncproxy/0.log" Jan 20 18:41:55 crc kubenswrapper[4995]: I0120 18:41:55.024311 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-f6vqc_a463f304-1432-497f-9f19-3cd3b4d05da2/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:55 crc kubenswrapper[4995]: I0120 18:41:55.091376 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4a499d87-fe94-4606-85e0-a225b12773f7/nova-metadata-log/0.log" Jan 20 18:41:57 crc kubenswrapper[4995]: I0120 18:41:57.194728 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/controller/0.log" Jan 20 18:41:57 crc kubenswrapper[4995]: I0120 18:41:57.204165 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/kube-rbac-proxy/0.log" Jan 20 18:41:57 crc kubenswrapper[4995]: I0120 18:41:57.249675 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/controller/0.log" Jan 20 18:41:57 crc kubenswrapper[4995]: I0120 18:41:57.758965 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_4a499d87-fe94-4606-85e0-a225b12773f7/nova-metadata-metadata/0.log" Jan 20 18:41:57 crc kubenswrapper[4995]: I0120 18:41:57.991336 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:41:57 crc kubenswrapper[4995]: E0120 18:41:57.991580 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.139439 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_b776e369-c664-4e5e-a256-b5a1725c0142/nova-scheduler-scheduler/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.179282 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a68274bb-aba1-4c92-85ae-8e043d5ac325/galera/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.193698 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_a68274bb-aba1-4c92-85ae-8e043d5ac325/mysql-bootstrap/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.224403 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6/galera/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.241576 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_e320668b-9ab9-4ab4-b48b-9e6b4b52e2e6/mysql-bootstrap/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.259379 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_82097345-279c-4f86-ad0d-29cd82acf859/openstackclient/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.276638 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-vfjkw_5e655fe2-263f-4d77-b9fd-af0528012527/openstack-network-exporter/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.300784 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-q9nkf_028bd686-8a70-4866-968f-c29ab470e44c/ovsdb-server/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.311034 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-q9nkf_028bd686-8a70-4866-968f-c29ab470e44c/ovs-vswitchd/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.319318 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-q9nkf_028bd686-8a70-4866-968f-c29ab470e44c/ovsdb-server-init/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.342560 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-spc7x_54be3683-2d75-43fd-8301-e05b2a5103cc/ovn-controller/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.413846 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-nwtzl_2c510d64-d6b7-41c0-a293-4528282415ec/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.428176 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c79dd22d-f0b7-4102-a740-1e5c88a5a548/ovn-northd/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.434713 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c79dd22d-f0b7-4102-a740-1e5c88a5a548/openstack-network-exporter/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.454228 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_929cb9df-f5d9-4b0b-972c-5b79b6e28ab8/ovsdbserver-nb/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.460204 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_929cb9df-f5d9-4b0b-972c-5b79b6e28ab8/openstack-network-exporter/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.484004 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_9283bc9e-66ee-4ded-b64e-3bdca7f112b4/ovsdbserver-sb/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.497862 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_9283bc9e-66ee-4ded-b64e-3bdca7f112b4/openstack-network-exporter/0.log" Jan 20 18:41:58 crc kubenswrapper[4995]: I0120 18:41:58.862938 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-679d748c9b-mrbbx_ff10efe7-680b-4d4a-a950-e2a7dfbd24a1/placement-log/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.089854 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-679d748c9b-mrbbx_ff10efe7-680b-4d4a-a950-e2a7dfbd24a1/placement-api/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.117207 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_69015998-1253-4181-99d4-1ea45f6ca788/prometheus/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.124036 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_69015998-1253-4181-99d4-1ea45f6ca788/config-reloader/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.131944 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_69015998-1253-4181-99d4-1ea45f6ca788/thanos-sidecar/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.140909 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_69015998-1253-4181-99d4-1ea45f6ca788/init-config-reloader/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.192180 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_a10b59cc-41b2-49f9-ba12-2bdb82b568f7/rabbitmq/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.197259 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_a10b59cc-41b2-49f9-ba12-2bdb82b568f7/setup-container/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.228401 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cfa14e5d-418a-4eed-96fe-fef4b2a88543/rabbitmq/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.234248 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_cfa14e5d-418a-4eed-96fe-fef4b2a88543/setup-container/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.251009 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-ffjt7_152fb470-c7e5-4e8f-86b4-5e816d021183/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.260682 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-tf4fv_7e4edc52-6ba0-441c-abeb-a7f17b0cb31f/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.274526 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-jvxbz_0b6494de-6466-4ecf-99d4-e410e3829130/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.297047 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-46zxc_bcd29c3f-dbeb-439f-98f8-7d4aa39597d4/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.313713 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-kr7jc_341c1f6f-c1d4-49a7-8980-7a6f9df0c216/ssh-known-hosts-edpm-deployment/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.694551 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6f5d884999-jxjqt_6da8401d-a15a-4ff6-ab0f-11cbafff0855/proxy-httpd/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.719000 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6f5d884999-jxjqt_6da8401d-a15a-4ff6-ab0f-11cbafff0855/proxy-server/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.727201 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-nmjp6_f955d94a-612b-4962-9745-ac012f2398b2/swift-ring-rebalance/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.786740 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/account-server/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.854423 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/account-replicator/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.861390 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/account-auditor/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.869045 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/account-reaper/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.879410 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/container-server/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.951975 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/container-replicator/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.959678 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/container-auditor/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.969606 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/container-updater/0.log" Jan 20 18:41:59 crc kubenswrapper[4995]: I0120 18:41:59.976061 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-server/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.034062 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-replicator/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.074028 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.077428 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-auditor/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.085814 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/reloader/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.087809 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-updater/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.090263 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr-metrics/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.097843 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.101451 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/object-expirer/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.107562 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/rsync/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.107859 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy-frr/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.114586 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-frr-files/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.114663 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_3f11d1ef-8720-4a15-91b7-2ad1602194f7/swift-recon-cron/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.122898 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-reloader/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.130235 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-metrics/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.143131 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-zwjrk_473c4019-d6be-4420-a678-d18999ddbe1c/frr-k8s-webhook-server/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.169401 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6dd7779458-w2rt4_8b17b582-a06b-4ece-b513-7f826c838f6f/manager/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.181978 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5c6d4b5599-f8tsv_7fae7627-5782-4525-ba17-4507d15764cd/webhook-server/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.183641 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-zg6w7_896e00af-dc03-4ed9-b3e7-314eaf50d3b9/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.218979 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_356ca6c0-8604-40b3-b965-af9225ea185f/tempest-tests-tempest-tests-runner/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.228485 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_d6171ebe-412f-4cf2-839e-785eeeaf714b/test-operator-logs-container/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.244906 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-rqhp5_78a82208-d087-4194-ab1e-c3df98c3321e/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.821853 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/speaker/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.831705 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/kube-rbac-proxy/0.log" Jan 20 18:42:00 crc kubenswrapper[4995]: I0120 18:42:00.915518 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_e797cd4f-fdf8-485b-94e6-2a1105dedb71/watcher-api-log/0.log" Jan 20 18:42:06 crc kubenswrapper[4995]: I0120 18:42:06.140974 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_e797cd4f-fdf8-485b-94e6-2a1105dedb71/watcher-api/0.log" Jan 20 18:42:06 crc kubenswrapper[4995]: I0120 18:42:06.412637 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_fe959b99-aa94-41d7-aefa-6e6803a337cf/watcher-applier/0.log" Jan 20 18:42:07 crc kubenswrapper[4995]: I0120 18:42:07.149886 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_f3461cc0-9ae2-4e3b-a0ba-070e6273cba0/watcher-decision-engine/0.log" Jan 20 18:42:08 crc kubenswrapper[4995]: I0120 18:42:08.989771 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:42:08 crc kubenswrapper[4995]: E0120 18:42:08.990429 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:42:14 crc kubenswrapper[4995]: I0120 18:42:14.647038 4995 generic.go:334] "Generic (PLEG): container finished" podID="171b8ac0-9936-43fc-a42d-a685a28f7cc2" containerID="f4c68c88876aa7b959c7838f2308a04229b4910c00f8af68d96bb469ff9285c3" exitCode=0 Jan 20 18:42:14 crc kubenswrapper[4995]: I0120 18:42:14.647184 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/crc-debug-mffrp" event={"ID":"171b8ac0-9936-43fc-a42d-a685a28f7cc2","Type":"ContainerDied","Data":"f4c68c88876aa7b959c7838f2308a04229b4910c00f8af68d96bb469ff9285c3"} Jan 20 18:42:15 crc kubenswrapper[4995]: I0120 18:42:15.782943 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:42:15 crc kubenswrapper[4995]: I0120 18:42:15.818465 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-mblc8/crc-debug-mffrp"] Jan 20 18:42:15 crc kubenswrapper[4995]: I0120 18:42:15.832769 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-mblc8/crc-debug-mffrp"] Jan 20 18:42:15 crc kubenswrapper[4995]: I0120 18:42:15.950654 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/171b8ac0-9936-43fc-a42d-a685a28f7cc2-host\") pod \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\" (UID: \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\") " Jan 20 18:42:15 crc kubenswrapper[4995]: I0120 18:42:15.950721 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wr66\" (UniqueName: \"kubernetes.io/projected/171b8ac0-9936-43fc-a42d-a685a28f7cc2-kube-api-access-5wr66\") pod \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\" (UID: \"171b8ac0-9936-43fc-a42d-a685a28f7cc2\") " Jan 20 18:42:15 crc kubenswrapper[4995]: I0120 18:42:15.951734 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/171b8ac0-9936-43fc-a42d-a685a28f7cc2-host" (OuterVolumeSpecName: "host") pod "171b8ac0-9936-43fc-a42d-a685a28f7cc2" (UID: "171b8ac0-9936-43fc-a42d-a685a28f7cc2"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 18:42:15 crc kubenswrapper[4995]: I0120 18:42:15.956774 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/171b8ac0-9936-43fc-a42d-a685a28f7cc2-kube-api-access-5wr66" (OuterVolumeSpecName: "kube-api-access-5wr66") pod "171b8ac0-9936-43fc-a42d-a685a28f7cc2" (UID: "171b8ac0-9936-43fc-a42d-a685a28f7cc2"). InnerVolumeSpecName "kube-api-access-5wr66". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:42:16 crc kubenswrapper[4995]: I0120 18:42:16.012557 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="171b8ac0-9936-43fc-a42d-a685a28f7cc2" path="/var/lib/kubelet/pods/171b8ac0-9936-43fc-a42d-a685a28f7cc2/volumes" Jan 20 18:42:16 crc kubenswrapper[4995]: I0120 18:42:16.053267 4995 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/171b8ac0-9936-43fc-a42d-a685a28f7cc2-host\") on node \"crc\" DevicePath \"\"" Jan 20 18:42:16 crc kubenswrapper[4995]: I0120 18:42:16.053318 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wr66\" (UniqueName: \"kubernetes.io/projected/171b8ac0-9936-43fc-a42d-a685a28f7cc2-kube-api-access-5wr66\") on node \"crc\" DevicePath \"\"" Jan 20 18:42:16 crc kubenswrapper[4995]: I0120 18:42:16.671204 4995 scope.go:117] "RemoveContainer" containerID="f4c68c88876aa7b959c7838f2308a04229b4910c00f8af68d96bb469ff9285c3" Jan 20 18:42:16 crc kubenswrapper[4995]: I0120 18:42:16.671230 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-mffrp" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.041712 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-mblc8/crc-debug-4v265"] Jan 20 18:42:17 crc kubenswrapper[4995]: E0120 18:42:17.042231 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="171b8ac0-9936-43fc-a42d-a685a28f7cc2" containerName="container-00" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.042248 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="171b8ac0-9936-43fc-a42d-a685a28f7cc2" containerName="container-00" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.042486 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="171b8ac0-9936-43fc-a42d-a685a28f7cc2" containerName="container-00" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.043343 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.178610 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9ftp\" (UniqueName: \"kubernetes.io/projected/91c00d30-e88b-474a-b024-2059d1caca5c-kube-api-access-h9ftp\") pod \"crc-debug-4v265\" (UID: \"91c00d30-e88b-474a-b024-2059d1caca5c\") " pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.178957 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/91c00d30-e88b-474a-b024-2059d1caca5c-host\") pod \"crc-debug-4v265\" (UID: \"91c00d30-e88b-474a-b024-2059d1caca5c\") " pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.282322 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9ftp\" (UniqueName: \"kubernetes.io/projected/91c00d30-e88b-474a-b024-2059d1caca5c-kube-api-access-h9ftp\") pod \"crc-debug-4v265\" (UID: \"91c00d30-e88b-474a-b024-2059d1caca5c\") " pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.282715 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/91c00d30-e88b-474a-b024-2059d1caca5c-host\") pod \"crc-debug-4v265\" (UID: \"91c00d30-e88b-474a-b024-2059d1caca5c\") " pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.282899 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/91c00d30-e88b-474a-b024-2059d1caca5c-host\") pod \"crc-debug-4v265\" (UID: \"91c00d30-e88b-474a-b024-2059d1caca5c\") " pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.303405 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9ftp\" (UniqueName: \"kubernetes.io/projected/91c00d30-e88b-474a-b024-2059d1caca5c-kube-api-access-h9ftp\") pod \"crc-debug-4v265\" (UID: \"91c00d30-e88b-474a-b024-2059d1caca5c\") " pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.360620 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.687262 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/crc-debug-4v265" event={"ID":"91c00d30-e88b-474a-b024-2059d1caca5c","Type":"ContainerStarted","Data":"53b946962438b1dfc95ad08a73d1777a6de3f2c50b110ca6708c4e049701e11a"} Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.687789 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/crc-debug-4v265" event={"ID":"91c00d30-e88b-474a-b024-2059d1caca5c","Type":"ContainerStarted","Data":"6900e8c75a03a594a650d1afb2b786ae3bbf0ba3e606651e084ec21f028f3b64"} Jan 20 18:42:17 crc kubenswrapper[4995]: I0120 18:42:17.710473 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-mblc8/crc-debug-4v265" podStartSLOduration=0.710446339 podStartE2EDuration="710.446339ms" podCreationTimestamp="2026-01-20 18:42:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 18:42:17.700951123 +0000 UTC m=+7855.945555939" watchObservedRunningTime="2026-01-20 18:42:17.710446339 +0000 UTC m=+7855.955051155" Jan 20 18:42:18 crc kubenswrapper[4995]: I0120 18:42:18.702862 4995 generic.go:334] "Generic (PLEG): container finished" podID="91c00d30-e88b-474a-b024-2059d1caca5c" containerID="53b946962438b1dfc95ad08a73d1777a6de3f2c50b110ca6708c4e049701e11a" exitCode=0 Jan 20 18:42:18 crc kubenswrapper[4995]: I0120 18:42:18.702904 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/crc-debug-4v265" event={"ID":"91c00d30-e88b-474a-b024-2059d1caca5c","Type":"ContainerDied","Data":"53b946962438b1dfc95ad08a73d1777a6de3f2c50b110ca6708c4e049701e11a"} Jan 20 18:42:19 crc kubenswrapper[4995]: I0120 18:42:19.832291 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:19 crc kubenswrapper[4995]: I0120 18:42:19.926671 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9ftp\" (UniqueName: \"kubernetes.io/projected/91c00d30-e88b-474a-b024-2059d1caca5c-kube-api-access-h9ftp\") pod \"91c00d30-e88b-474a-b024-2059d1caca5c\" (UID: \"91c00d30-e88b-474a-b024-2059d1caca5c\") " Jan 20 18:42:19 crc kubenswrapper[4995]: I0120 18:42:19.926814 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/91c00d30-e88b-474a-b024-2059d1caca5c-host\") pod \"91c00d30-e88b-474a-b024-2059d1caca5c\" (UID: \"91c00d30-e88b-474a-b024-2059d1caca5c\") " Jan 20 18:42:19 crc kubenswrapper[4995]: I0120 18:42:19.926979 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/91c00d30-e88b-474a-b024-2059d1caca5c-host" (OuterVolumeSpecName: "host") pod "91c00d30-e88b-474a-b024-2059d1caca5c" (UID: "91c00d30-e88b-474a-b024-2059d1caca5c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 18:42:19 crc kubenswrapper[4995]: I0120 18:42:19.927409 4995 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/91c00d30-e88b-474a-b024-2059d1caca5c-host\") on node \"crc\" DevicePath \"\"" Jan 20 18:42:19 crc kubenswrapper[4995]: I0120 18:42:19.932033 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91c00d30-e88b-474a-b024-2059d1caca5c-kube-api-access-h9ftp" (OuterVolumeSpecName: "kube-api-access-h9ftp") pod "91c00d30-e88b-474a-b024-2059d1caca5c" (UID: "91c00d30-e88b-474a-b024-2059d1caca5c"). InnerVolumeSpecName "kube-api-access-h9ftp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:42:20 crc kubenswrapper[4995]: I0120 18:42:20.029036 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9ftp\" (UniqueName: \"kubernetes.io/projected/91c00d30-e88b-474a-b024-2059d1caca5c-kube-api-access-h9ftp\") on node \"crc\" DevicePath \"\"" Jan 20 18:42:20 crc kubenswrapper[4995]: I0120 18:42:20.572353 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-mblc8/crc-debug-4v265"] Jan 20 18:42:20 crc kubenswrapper[4995]: I0120 18:42:20.587399 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-mblc8/crc-debug-4v265"] Jan 20 18:42:20 crc kubenswrapper[4995]: I0120 18:42:20.727063 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6900e8c75a03a594a650d1afb2b786ae3bbf0ba3e606651e084ec21f028f3b64" Jan 20 18:42:20 crc kubenswrapper[4995]: I0120 18:42:20.727177 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-4v265" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.756955 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-mblc8/crc-debug-smv5m"] Jan 20 18:42:21 crc kubenswrapper[4995]: E0120 18:42:21.758516 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c00d30-e88b-474a-b024-2059d1caca5c" containerName="container-00" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.758598 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c00d30-e88b-474a-b024-2059d1caca5c" containerName="container-00" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.758867 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="91c00d30-e88b-474a-b024-2059d1caca5c" containerName="container-00" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.759718 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.862965 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/45607c50-2517-47ed-ba5e-521fc47216db-host\") pod \"crc-debug-smv5m\" (UID: \"45607c50-2517-47ed-ba5e-521fc47216db\") " pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.863275 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwg4d\" (UniqueName: \"kubernetes.io/projected/45607c50-2517-47ed-ba5e-521fc47216db-kube-api-access-wwg4d\") pod \"crc-debug-smv5m\" (UID: \"45607c50-2517-47ed-ba5e-521fc47216db\") " pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.965474 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/45607c50-2517-47ed-ba5e-521fc47216db-host\") pod \"crc-debug-smv5m\" (UID: \"45607c50-2517-47ed-ba5e-521fc47216db\") " pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.965548 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwg4d\" (UniqueName: \"kubernetes.io/projected/45607c50-2517-47ed-ba5e-521fc47216db-kube-api-access-wwg4d\") pod \"crc-debug-smv5m\" (UID: \"45607c50-2517-47ed-ba5e-521fc47216db\") " pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.965651 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/45607c50-2517-47ed-ba5e-521fc47216db-host\") pod \"crc-debug-smv5m\" (UID: \"45607c50-2517-47ed-ba5e-521fc47216db\") " pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:21 crc kubenswrapper[4995]: I0120 18:42:21.990543 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwg4d\" (UniqueName: \"kubernetes.io/projected/45607c50-2517-47ed-ba5e-521fc47216db-kube-api-access-wwg4d\") pod \"crc-debug-smv5m\" (UID: \"45607c50-2517-47ed-ba5e-521fc47216db\") " pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.004746 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:42:22 crc kubenswrapper[4995]: E0120 18:42:22.005213 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.019244 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91c00d30-e88b-474a-b024-2059d1caca5c" path="/var/lib/kubelet/pods/91c00d30-e88b-474a-b024-2059d1caca5c/volumes" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.076273 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:22 crc kubenswrapper[4995]: W0120 18:42:22.123658 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45607c50_2517_47ed_ba5e_521fc47216db.slice/crio-b63e7f90f1a209b91dc4023d61d771f83aad5ccfb60391ca17646c9e1d0f58a1 WatchSource:0}: Error finding container b63e7f90f1a209b91dc4023d61d771f83aad5ccfb60391ca17646c9e1d0f58a1: Status 404 returned error can't find the container with id b63e7f90f1a209b91dc4023d61d771f83aad5ccfb60391ca17646c9e1d0f58a1 Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.746438 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-7zdch_c0a3e997-8709-444b-ae4e-8fc34b04cb6e/manager/0.log" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.749043 4995 generic.go:334] "Generic (PLEG): container finished" podID="45607c50-2517-47ed-ba5e-521fc47216db" containerID="3fdbfe6786d95a23e141e8d26048406f15d68779bf0be20bcec119dcdb48749c" exitCode=0 Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.749151 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/crc-debug-smv5m" event={"ID":"45607c50-2517-47ed-ba5e-521fc47216db","Type":"ContainerDied","Data":"3fdbfe6786d95a23e141e8d26048406f15d68779bf0be20bcec119dcdb48749c"} Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.749190 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-mblc8/crc-debug-smv5m" event={"ID":"45607c50-2517-47ed-ba5e-521fc47216db","Type":"ContainerStarted","Data":"b63e7f90f1a209b91dc4023d61d771f83aad5ccfb60391ca17646c9e1d0f58a1"} Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.795967 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-zgvcz_3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a/manager/0.log" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.802833 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-mblc8/crc-debug-smv5m"] Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.806855 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-mj76w_9f302bf3-1501-44cc-924c-2e5c42c0eb58/manager/0.log" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.813456 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-mblc8/crc-debug-smv5m"] Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.820518 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/extract/0.log" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.826199 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/util/0.log" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.834629 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/pull/0.log" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.923116 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-bxm9j_072647c8-2d0e-4716-bb29-a87e3ff5cd29/manager/0.log" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.932820 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-wm2kb_49392c07-237b-447e-a126-f06e1cbf32a2/manager/0.log" Jan 20 18:42:22 crc kubenswrapper[4995]: I0120 18:42:22.967561 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-zs4nf_c8061771-759d-49d5-b88b-9d66f45277ac/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.247097 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-zd75z_439ab902-28ff-48a4-81e4-93c72937e573/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.258827 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-7p5v4_f4577775-2c19-495a-95e7-1638f359b533/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.333755 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-dwn52_a3c2211e-845d-47cc-b4a5-962340b0d53c/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.345793 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-wjrpf_93ac6eeb-0456-4cfe-8298-b8b97d09716c/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.389206 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-qd2nk_47ec26a3-41ca-482f-b539-c9dc32af0bb0/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.434783 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-m7p7b_86d4f806-c5e4-4ce0-a859-5e104b0d5dce/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.524939 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-95tbl_ffe39c73-665e-4de6-afb5-2e9b93419e33/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.538551 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-fk7x2_de6fc9c2-f9a9-41fd-8cfb-b0493d823c20/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.555736 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9_50e51652-8f18-4234-b29b-85e684e63bfd/manager/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.653876 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5c987874f9-t2thd_6bd0aa66-ff4d-43ff-925d-e3ead5943058/operator/0.log" Jan 20 18:42:23 crc kubenswrapper[4995]: I0120 18:42:23.902704 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.007948 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwg4d\" (UniqueName: \"kubernetes.io/projected/45607c50-2517-47ed-ba5e-521fc47216db-kube-api-access-wwg4d\") pod \"45607c50-2517-47ed-ba5e-521fc47216db\" (UID: \"45607c50-2517-47ed-ba5e-521fc47216db\") " Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.007979 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/45607c50-2517-47ed-ba5e-521fc47216db-host\") pod \"45607c50-2517-47ed-ba5e-521fc47216db\" (UID: \"45607c50-2517-47ed-ba5e-521fc47216db\") " Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.008150 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45607c50-2517-47ed-ba5e-521fc47216db-host" (OuterVolumeSpecName: "host") pod "45607c50-2517-47ed-ba5e-521fc47216db" (UID: "45607c50-2517-47ed-ba5e-521fc47216db"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.009248 4995 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/45607c50-2517-47ed-ba5e-521fc47216db-host\") on node \"crc\" DevicePath \"\"" Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.014414 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45607c50-2517-47ed-ba5e-521fc47216db-kube-api-access-wwg4d" (OuterVolumeSpecName: "kube-api-access-wwg4d") pod "45607c50-2517-47ed-ba5e-521fc47216db" (UID: "45607c50-2517-47ed-ba5e-521fc47216db"). InnerVolumeSpecName "kube-api-access-wwg4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.111628 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwg4d\" (UniqueName: \"kubernetes.io/projected/45607c50-2517-47ed-ba5e-521fc47216db-kube-api-access-wwg4d\") on node \"crc\" DevicePath \"\"" Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.784565 4995 scope.go:117] "RemoveContainer" containerID="3fdbfe6786d95a23e141e8d26048406f15d68779bf0be20bcec119dcdb48749c" Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.784790 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-mblc8/crc-debug-smv5m" Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.929491 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7696897b84-8gt6d_bb15a8a1-9d6b-4032-9ecb-71719f2b3d91/manager/0.log" Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.938830 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-r87vf_3e43abde-a2a7-4334-a3a2-7859aad1a87b/registry-server/0.log" Jan 20 18:42:24 crc kubenswrapper[4995]: I0120 18:42:24.993448 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-vvwk8_9d2f128c-9463-4735-9bf7-91bff7148887/manager/0.log" Jan 20 18:42:25 crc kubenswrapper[4995]: I0120 18:42:25.014513 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-6rn8v_37f347f2-1ab4-4e49-9340-57a960ff8eb1/manager/0.log" Jan 20 18:42:25 crc kubenswrapper[4995]: I0120 18:42:25.032638 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-jwzhh_31bd181f-39ff-4e9f-949c-8a6ed84f3f42/operator/0.log" Jan 20 18:42:25 crc kubenswrapper[4995]: I0120 18:42:25.058980 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-dxnvv_d0afd012-c6e1-4a66-a8a1-9edccfdff278/manager/0.log" Jan 20 18:42:25 crc kubenswrapper[4995]: I0120 18:42:25.249528 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-fdpgr_c2d307fa-2be9-4f04-8ae4-f3b55e987ceb/manager/0.log" Jan 20 18:42:25 crc kubenswrapper[4995]: I0120 18:42:25.261500 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-csd5m_8a04fc71-9575-4cf5-bdab-2c741002c47f/manager/0.log" Jan 20 18:42:25 crc kubenswrapper[4995]: I0120 18:42:25.325751 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6db9b5db6c-29hz8_17dfb7c9-6832-48d3-ad83-91508cf85de3/manager/0.log" Jan 20 18:42:26 crc kubenswrapper[4995]: I0120 18:42:26.002650 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45607c50-2517-47ed-ba5e-521fc47216db" path="/var/lib/kubelet/pods/45607c50-2517-47ed-ba5e-521fc47216db/volumes" Jan 20 18:42:30 crc kubenswrapper[4995]: I0120 18:42:30.285511 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-xxvtq_5573e17e-4b7e-4afd-8608-e8afd1c98256/control-plane-machine-set-operator/0.log" Jan 20 18:42:30 crc kubenswrapper[4995]: I0120 18:42:30.300323 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l4kss_2585d1e6-a851-4ecc-8acd-8fd3d2426576/kube-rbac-proxy/0.log" Jan 20 18:42:30 crc kubenswrapper[4995]: I0120 18:42:30.314628 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l4kss_2585d1e6-a851-4ecc-8acd-8fd3d2426576/machine-api-operator/0.log" Jan 20 18:42:35 crc kubenswrapper[4995]: I0120 18:42:35.990422 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:42:35 crc kubenswrapper[4995]: E0120 18:42:35.991558 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:42:36 crc kubenswrapper[4995]: I0120 18:42:36.500769 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-s4m7v_c4363779-0c13-4195-9d79-aa4271bfc02f/cert-manager-controller/0.log" Jan 20 18:42:36 crc kubenswrapper[4995]: I0120 18:42:36.519196 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-s9d5f_4b2e374d-19bf-42a0-8f00-7dea7ac84bea/cert-manager-cainjector/0.log" Jan 20 18:42:36 crc kubenswrapper[4995]: I0120 18:42:36.531516 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-zngmd_ac9c3170-cb6c-4320-ad74-57b76462b730/cert-manager-webhook/0.log" Jan 20 18:42:42 crc kubenswrapper[4995]: I0120 18:42:42.197981 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-wlqrf_e9e16a4a-ae36-4787-936d-78f9f621b82b/nmstate-console-plugin/0.log" Jan 20 18:42:42 crc kubenswrapper[4995]: I0120 18:42:42.222353 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-lzkmg_04b8b048-2dd6-4899-8012-e20e4783fe36/nmstate-handler/0.log" Jan 20 18:42:42 crc kubenswrapper[4995]: I0120 18:42:42.233528 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-wb9tm_95224d03-a236-4419-9ea7-35b72ad16367/nmstate-metrics/0.log" Jan 20 18:42:42 crc kubenswrapper[4995]: I0120 18:42:42.249385 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-wb9tm_95224d03-a236-4419-9ea7-35b72ad16367/kube-rbac-proxy/0.log" Jan 20 18:42:42 crc kubenswrapper[4995]: I0120 18:42:42.261178 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-l5g5b_eb9959cc-1ba1-48c5-9a2b-846fb2ae6590/nmstate-operator/0.log" Jan 20 18:42:42 crc kubenswrapper[4995]: I0120 18:42:42.276421 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-95rj4_5afe74bc-6c6a-4c69-8991-aea61b381a53/nmstate-webhook/0.log" Jan 20 18:42:47 crc kubenswrapper[4995]: I0120 18:42:47.994855 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:42:47 crc kubenswrapper[4995]: E0120 18:42:47.997411 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:42:49 crc kubenswrapper[4995]: I0120 18:42:49.032207 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-q94m9_5def50d1-b2d8-447a-8f22-8632fd26d689/prometheus-operator/0.log" Jan 20 18:42:49 crc kubenswrapper[4995]: I0120 18:42:49.047266 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl_27395fe5-dac8-4556-8446-a478ea8f7928/prometheus-operator-admission-webhook/0.log" Jan 20 18:42:49 crc kubenswrapper[4995]: I0120 18:42:49.059404 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77_15bbd7b9-457e-4456-ba6a-5f664a592bab/prometheus-operator-admission-webhook/0.log" Jan 20 18:42:49 crc kubenswrapper[4995]: I0120 18:42:49.091033 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-h2hm4_decae503-5765-4258-9081-981c2215ebcf/operator/0.log" Jan 20 18:42:49 crc kubenswrapper[4995]: I0120 18:42:49.101641 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-thhmm_502777ef-bdd5-4d42-b695-a7259cd811c9/perses-operator/0.log" Jan 20 18:42:55 crc kubenswrapper[4995]: I0120 18:42:55.437795 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/controller/0.log" Jan 20 18:42:55 crc kubenswrapper[4995]: I0120 18:42:55.445418 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/kube-rbac-proxy/0.log" Jan 20 18:42:55 crc kubenswrapper[4995]: I0120 18:42:55.471631 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/controller/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.276544 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.289515 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/reloader/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.296324 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr-metrics/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.308700 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.317842 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy-frr/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.326060 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-frr-files/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.333651 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-reloader/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.342698 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-metrics/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.352681 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-zwjrk_473c4019-d6be-4420-a678-d18999ddbe1c/frr-k8s-webhook-server/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.384807 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6dd7779458-w2rt4_8b17b582-a06b-4ece-b513-7f826c838f6f/manager/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.396477 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5c6d4b5599-f8tsv_7fae7627-5782-4525-ba17-4507d15764cd/webhook-server/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.784576 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/speaker/0.log" Jan 20 18:42:57 crc kubenswrapper[4995]: I0120 18:42:57.792924 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/kube-rbac-proxy/0.log" Jan 20 18:42:58 crc kubenswrapper[4995]: I0120 18:42:58.989660 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:42:58 crc kubenswrapper[4995]: E0120 18:42:58.990002 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:43:01 crc kubenswrapper[4995]: I0120 18:43:01.803338 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b_d10c311c-330e-4ef3-bfb4-bbb14ca8d42d/extract/0.log" Jan 20 18:43:01 crc kubenswrapper[4995]: I0120 18:43:01.812598 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b_d10c311c-330e-4ef3-bfb4-bbb14ca8d42d/util/0.log" Jan 20 18:43:01 crc kubenswrapper[4995]: I0120 18:43:01.818892 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcgwq6b_d10c311c-330e-4ef3-bfb4-bbb14ca8d42d/pull/0.log" Jan 20 18:43:01 crc kubenswrapper[4995]: I0120 18:43:01.826495 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp_66d983b8-16a0-44ba-8e76-c1a6645c2001/extract/0.log" Jan 20 18:43:01 crc kubenswrapper[4995]: I0120 18:43:01.831657 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp_66d983b8-16a0-44ba-8e76-c1a6645c2001/util/0.log" Jan 20 18:43:01 crc kubenswrapper[4995]: I0120 18:43:01.837367 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71397xlp_66d983b8-16a0-44ba-8e76-c1a6645c2001/pull/0.log" Jan 20 18:43:01 crc kubenswrapper[4995]: I0120 18:43:01.848391 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc_bb24643a-1c98-49d5-a82c-53b3f9fb88f6/extract/0.log" Jan 20 18:43:01 crc kubenswrapper[4995]: I0120 18:43:01.855093 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc_bb24643a-1c98-49d5-a82c-53b3f9fb88f6/util/0.log" Jan 20 18:43:01 crc kubenswrapper[4995]: I0120 18:43:01.862246 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08w8nnc_bb24643a-1c98-49d5-a82c-53b3f9fb88f6/pull/0.log" Jan 20 18:43:02 crc kubenswrapper[4995]: I0120 18:43:02.257613 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4fcqp_075a85ee-fd7b-44e9-a631-840b4fcc96fb/registry-server/0.log" Jan 20 18:43:02 crc kubenswrapper[4995]: I0120 18:43:02.263958 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4fcqp_075a85ee-fd7b-44e9-a631-840b4fcc96fb/extract-utilities/0.log" Jan 20 18:43:02 crc kubenswrapper[4995]: I0120 18:43:02.271487 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-4fcqp_075a85ee-fd7b-44e9-a631-840b4fcc96fb/extract-content/0.log" Jan 20 18:43:03 crc kubenswrapper[4995]: I0120 18:43:03.472560 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x4fxr_04b5f989-6750-4e5c-8ded-4af0bf07325b/registry-server/0.log" Jan 20 18:43:03 crc kubenswrapper[4995]: I0120 18:43:03.480439 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x4fxr_04b5f989-6750-4e5c-8ded-4af0bf07325b/extract-utilities/0.log" Jan 20 18:43:03 crc kubenswrapper[4995]: I0120 18:43:03.487701 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x4fxr_04b5f989-6750-4e5c-8ded-4af0bf07325b/extract-content/0.log" Jan 20 18:43:03 crc kubenswrapper[4995]: I0120 18:43:03.503255 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-l2zqv_9c2404e7-457d-4f79-814d-f6a44e88c749/marketplace-operator/0.log" Jan 20 18:43:03 crc kubenswrapper[4995]: I0120 18:43:03.815574 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-62wzq_9539d617-3abb-4dd5-aa3a-f9f6dd8615bb/registry-server/0.log" Jan 20 18:43:04 crc kubenswrapper[4995]: I0120 18:43:04.316922 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-62wzq_9539d617-3abb-4dd5-aa3a-f9f6dd8615bb/extract-utilities/0.log" Jan 20 18:43:04 crc kubenswrapper[4995]: I0120 18:43:04.341337 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-62wzq_9539d617-3abb-4dd5-aa3a-f9f6dd8615bb/extract-content/0.log" Jan 20 18:43:05 crc kubenswrapper[4995]: I0120 18:43:05.011956 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nzn96_144eba19-b4f0-48d9-a1f6-fc191b87c617/registry-server/0.log" Jan 20 18:43:05 crc kubenswrapper[4995]: I0120 18:43:05.017239 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nzn96_144eba19-b4f0-48d9-a1f6-fc191b87c617/extract-utilities/0.log" Jan 20 18:43:05 crc kubenswrapper[4995]: I0120 18:43:05.024355 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nzn96_144eba19-b4f0-48d9-a1f6-fc191b87c617/extract-content/0.log" Jan 20 18:43:08 crc kubenswrapper[4995]: I0120 18:43:08.892042 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-q94m9_5def50d1-b2d8-447a-8f22-8632fd26d689/prometheus-operator/0.log" Jan 20 18:43:08 crc kubenswrapper[4995]: I0120 18:43:08.912751 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl_27395fe5-dac8-4556-8446-a478ea8f7928/prometheus-operator-admission-webhook/0.log" Jan 20 18:43:08 crc kubenswrapper[4995]: I0120 18:43:08.924587 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77_15bbd7b9-457e-4456-ba6a-5f664a592bab/prometheus-operator-admission-webhook/0.log" Jan 20 18:43:08 crc kubenswrapper[4995]: I0120 18:43:08.964523 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-h2hm4_decae503-5765-4258-9081-981c2215ebcf/operator/0.log" Jan 20 18:43:08 crc kubenswrapper[4995]: I0120 18:43:08.979060 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-thhmm_502777ef-bdd5-4d42-b695-a7259cd811c9/perses-operator/0.log" Jan 20 18:43:13 crc kubenswrapper[4995]: I0120 18:43:13.990857 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:43:13 crc kubenswrapper[4995]: E0120 18:43:13.991417 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:43:27 crc kubenswrapper[4995]: I0120 18:43:27.990229 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:43:27 crc kubenswrapper[4995]: E0120 18:43:27.990899 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:43:40 crc kubenswrapper[4995]: I0120 18:43:40.990768 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:43:40 crc kubenswrapper[4995]: E0120 18:43:40.991357 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:43:53 crc kubenswrapper[4995]: I0120 18:43:53.989853 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:43:53 crc kubenswrapper[4995]: E0120 18:43:53.990716 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:44:04 crc kubenswrapper[4995]: I0120 18:44:04.990210 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:44:04 crc kubenswrapper[4995]: E0120 18:44:04.991044 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:44:16 crc kubenswrapper[4995]: I0120 18:44:16.989326 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:44:16 crc kubenswrapper[4995]: E0120 18:44:16.990051 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:44:31 crc kubenswrapper[4995]: I0120 18:44:31.995701 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:44:31 crc kubenswrapper[4995]: E0120 18:44:31.996679 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:44:34 crc kubenswrapper[4995]: I0120 18:44:34.579963 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-q94m9_5def50d1-b2d8-447a-8f22-8632fd26d689/prometheus-operator/0.log" Jan 20 18:44:34 crc kubenswrapper[4995]: I0120 18:44:34.602021 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-csqbl_27395fe5-dac8-4556-8446-a478ea8f7928/prometheus-operator-admission-webhook/0.log" Jan 20 18:44:34 crc kubenswrapper[4995]: I0120 18:44:34.614821 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-597c8bbbdd-jrq77_15bbd7b9-457e-4456-ba6a-5f664a592bab/prometheus-operator-admission-webhook/0.log" Jan 20 18:44:34 crc kubenswrapper[4995]: I0120 18:44:34.649755 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-h2hm4_decae503-5765-4258-9081-981c2215ebcf/operator/0.log" Jan 20 18:44:34 crc kubenswrapper[4995]: I0120 18:44:34.659882 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-thhmm_502777ef-bdd5-4d42-b695-a7259cd811c9/perses-operator/0.log" Jan 20 18:44:34 crc kubenswrapper[4995]: I0120 18:44:34.763564 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-s4m7v_c4363779-0c13-4195-9d79-aa4271bfc02f/cert-manager-controller/0.log" Jan 20 18:44:34 crc kubenswrapper[4995]: I0120 18:44:34.783324 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-s9d5f_4b2e374d-19bf-42a0-8f00-7dea7ac84bea/cert-manager-cainjector/0.log" Jan 20 18:44:34 crc kubenswrapper[4995]: I0120 18:44:34.800966 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-zngmd_ac9c3170-cb6c-4320-ad74-57b76462b730/cert-manager-webhook/0.log" Jan 20 18:44:35 crc kubenswrapper[4995]: I0120 18:44:35.934034 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/controller/0.log" Jan 20 18:44:35 crc kubenswrapper[4995]: I0120 18:44:35.942636 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-4qn5w_eb77594b-535f-4b63-967f-05cd3314ceb9/kube-rbac-proxy/0.log" Jan 20 18:44:35 crc kubenswrapper[4995]: I0120 18:44:35.963333 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/controller/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.168093 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-7zdch_c0a3e997-8709-444b-ae4e-8fc34b04cb6e/manager/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.222210 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-zgvcz_3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a/manager/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.239699 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-mj76w_9f302bf3-1501-44cc-924c-2e5c42c0eb58/manager/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.253862 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/extract/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.260719 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/util/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.270459 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/pull/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.407814 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-bxm9j_072647c8-2d0e-4716-bb29-a87e3ff5cd29/manager/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.423940 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-wm2kb_49392c07-237b-447e-a126-f06e1cbf32a2/manager/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.452069 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-zs4nf_c8061771-759d-49d5-b88b-9d66f45277ac/manager/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.936128 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-zd75z_439ab902-28ff-48a4-81e4-93c72937e573/manager/0.log" Jan 20 18:44:36 crc kubenswrapper[4995]: I0120 18:44:36.954899 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-7p5v4_f4577775-2c19-495a-95e7-1638f359b533/manager/0.log" Jan 20 18:44:37 crc kubenswrapper[4995]: I0120 18:44:37.064160 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-dwn52_a3c2211e-845d-47cc-b4a5-962340b0d53c/manager/0.log" Jan 20 18:44:37 crc kubenswrapper[4995]: I0120 18:44:37.076405 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-wjrpf_93ac6eeb-0456-4cfe-8298-b8b97d09716c/manager/0.log" Jan 20 18:44:37 crc kubenswrapper[4995]: I0120 18:44:37.128238 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-qd2nk_47ec26a3-41ca-482f-b539-c9dc32af0bb0/manager/0.log" Jan 20 18:44:37 crc kubenswrapper[4995]: I0120 18:44:37.201390 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-m7p7b_86d4f806-c5e4-4ce0-a859-5e104b0d5dce/manager/0.log" Jan 20 18:44:37 crc kubenswrapper[4995]: I0120 18:44:37.334853 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-95tbl_ffe39c73-665e-4de6-afb5-2e9b93419e33/manager/0.log" Jan 20 18:44:37 crc kubenswrapper[4995]: I0120 18:44:37.385154 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-fk7x2_de6fc9c2-f9a9-41fd-8cfb-b0493d823c20/manager/0.log" Jan 20 18:44:37 crc kubenswrapper[4995]: I0120 18:44:37.401199 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9_50e51652-8f18-4234-b29b-85e684e63bfd/manager/0.log" Jan 20 18:44:37 crc kubenswrapper[4995]: I0120 18:44:37.536587 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5c987874f9-t2thd_6bd0aa66-ff4d-43ff-925d-e3ead5943058/operator/0.log" Jan 20 18:44:38 crc kubenswrapper[4995]: I0120 18:44:38.989029 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.003503 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/reloader/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.008287 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/frr-metrics/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.019513 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.026604 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/kube-rbac-proxy-frr/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.030590 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-frr-files/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.039673 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-reloader/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.047091 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-wbmjs_5606415b-e263-4896-90b7-62fab9ff9d6a/cp-metrics/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.088831 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g8f9c"] Jan 20 18:44:39 crc kubenswrapper[4995]: E0120 18:44:39.090377 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45607c50-2517-47ed-ba5e-521fc47216db" containerName="container-00" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.090661 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="45607c50-2517-47ed-ba5e-521fc47216db" containerName="container-00" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.091494 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="45607c50-2517-47ed-ba5e-521fc47216db" containerName="container-00" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.093872 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-zwjrk_473c4019-d6be-4420-a678-d18999ddbe1c/frr-k8s-webhook-server/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.098957 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.121767 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-utilities\") pod \"certified-operators-g8f9c\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.122009 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-catalog-content\") pod \"certified-operators-g8f9c\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.128309 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llrvn\" (UniqueName: \"kubernetes.io/projected/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-kube-api-access-llrvn\") pod \"certified-operators-g8f9c\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.131245 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g8f9c"] Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.147015 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6dd7779458-w2rt4_8b17b582-a06b-4ece-b513-7f826c838f6f/manager/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.176417 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5c6d4b5599-f8tsv_7fae7627-5782-4525-ba17-4507d15764cd/webhook-server/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.231513 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-utilities\") pod \"certified-operators-g8f9c\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.231554 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-catalog-content\") pod \"certified-operators-g8f9c\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.231615 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llrvn\" (UniqueName: \"kubernetes.io/projected/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-kube-api-access-llrvn\") pod \"certified-operators-g8f9c\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.232068 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-utilities\") pod \"certified-operators-g8f9c\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.232255 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-catalog-content\") pod \"certified-operators-g8f9c\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.259047 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llrvn\" (UniqueName: \"kubernetes.io/projected/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-kube-api-access-llrvn\") pod \"certified-operators-g8f9c\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.457265 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.534954 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7696897b84-8gt6d_bb15a8a1-9d6b-4032-9ecb-71719f2b3d91/manager/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.583172 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-r87vf_3e43abde-a2a7-4334-a3a2-7859aad1a87b/registry-server/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.720923 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-vvwk8_9d2f128c-9463-4735-9bf7-91bff7148887/manager/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.822569 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-6rn8v_37f347f2-1ab4-4e49-9340-57a960ff8eb1/manager/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.845846 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-jwzhh_31bd181f-39ff-4e9f-949c-8a6ed84f3f42/operator/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.857477 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/speaker/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.869615 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cgq47_08829f43-9e73-4c1a-b4dc-16d2f1e01a3b/kube-rbac-proxy/0.log" Jan 20 18:44:39 crc kubenswrapper[4995]: I0120 18:44:39.891537 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-dxnvv_d0afd012-c6e1-4a66-a8a1-9edccfdff278/manager/0.log" Jan 20 18:44:40 crc kubenswrapper[4995]: I0120 18:44:40.047241 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-fdpgr_c2d307fa-2be9-4f04-8ae4-f3b55e987ceb/manager/0.log" Jan 20 18:44:40 crc kubenswrapper[4995]: I0120 18:44:40.054426 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g8f9c"] Jan 20 18:44:40 crc kubenswrapper[4995]: I0120 18:44:40.058440 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-csd5m_8a04fc71-9575-4cf5-bdab-2c741002c47f/manager/0.log" Jan 20 18:44:40 crc kubenswrapper[4995]: I0120 18:44:40.116744 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6db9b5db6c-29hz8_17dfb7c9-6832-48d3-ad83-91508cf85de3/manager/0.log" Jan 20 18:44:40 crc kubenswrapper[4995]: I0120 18:44:40.223099 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8f9c" event={"ID":"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3","Type":"ContainerStarted","Data":"e2b9b24e10fbb0da48b950cf2adc8e9e29b6c5b7e9755535a9f41d18b8c9921e"} Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.276482 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n6zb7"] Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.278699 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.309137 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n6zb7"] Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.334716 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-catalog-content\") pod \"redhat-marketplace-n6zb7\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.334828 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p59vj\" (UniqueName: \"kubernetes.io/projected/9a681626-4bff-4503-9380-b3fdb32b264a-kube-api-access-p59vj\") pod \"redhat-marketplace-n6zb7\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.334879 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-utilities\") pod \"redhat-marketplace-n6zb7\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.436477 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p59vj\" (UniqueName: \"kubernetes.io/projected/9a681626-4bff-4503-9380-b3fdb32b264a-kube-api-access-p59vj\") pod \"redhat-marketplace-n6zb7\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.436554 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-utilities\") pod \"redhat-marketplace-n6zb7\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.436711 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-catalog-content\") pod \"redhat-marketplace-n6zb7\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.437198 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-catalog-content\") pod \"redhat-marketplace-n6zb7\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.437791 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-utilities\") pod \"redhat-marketplace-n6zb7\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.460859 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p59vj\" (UniqueName: \"kubernetes.io/projected/9a681626-4bff-4503-9380-b3fdb32b264a-kube-api-access-p59vj\") pod \"redhat-marketplace-n6zb7\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.617181 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.819988 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-wlqrf_e9e16a4a-ae36-4787-936d-78f9f621b82b/nmstate-console-plugin/0.log" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.863348 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-lzkmg_04b8b048-2dd6-4899-8012-e20e4783fe36/nmstate-handler/0.log" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.888020 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-wb9tm_95224d03-a236-4419-9ea7-35b72ad16367/nmstate-metrics/0.log" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.901126 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-wb9tm_95224d03-a236-4419-9ea7-35b72ad16367/kube-rbac-proxy/0.log" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.940654 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-l5g5b_eb9959cc-1ba1-48c5-9a2b-846fb2ae6590/nmstate-operator/0.log" Jan 20 18:44:42 crc kubenswrapper[4995]: I0120 18:44:42.963936 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-95rj4_5afe74bc-6c6a-4c69-8991-aea61b381a53/nmstate-webhook/0.log" Jan 20 18:44:43 crc kubenswrapper[4995]: W0120 18:44:43.223137 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a681626_4bff_4503_9380_b3fdb32b264a.slice/crio-2f65d09f51f294539b6c49e690f7c9813bd0d5634207bb9d795fa07c50b6841f WatchSource:0}: Error finding container 2f65d09f51f294539b6c49e690f7c9813bd0d5634207bb9d795fa07c50b6841f: Status 404 returned error can't find the container with id 2f65d09f51f294539b6c49e690f7c9813bd0d5634207bb9d795fa07c50b6841f Jan 20 18:44:43 crc kubenswrapper[4995]: I0120 18:44:43.227456 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n6zb7"] Jan 20 18:44:43 crc kubenswrapper[4995]: I0120 18:44:43.238988 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-s4m7v_c4363779-0c13-4195-9d79-aa4271bfc02f/cert-manager-controller/0.log" Jan 20 18:44:43 crc kubenswrapper[4995]: I0120 18:44:43.248812 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-s9d5f_4b2e374d-19bf-42a0-8f00-7dea7ac84bea/cert-manager-cainjector/0.log" Jan 20 18:44:43 crc kubenswrapper[4995]: I0120 18:44:43.258388 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-zngmd_ac9c3170-cb6c-4320-ad74-57b76462b730/cert-manager-webhook/0.log" Jan 20 18:44:43 crc kubenswrapper[4995]: I0120 18:44:43.262524 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n6zb7" event={"ID":"9a681626-4bff-4503-9380-b3fdb32b264a","Type":"ContainerStarted","Data":"2f65d09f51f294539b6c49e690f7c9813bd0d5634207bb9d795fa07c50b6841f"} Jan 20 18:44:43 crc kubenswrapper[4995]: I0120 18:44:43.267323 4995 generic.go:334] "Generic (PLEG): container finished" podID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerID="70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee" exitCode=0 Jan 20 18:44:43 crc kubenswrapper[4995]: I0120 18:44:43.267373 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8f9c" event={"ID":"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3","Type":"ContainerDied","Data":"70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee"} Jan 20 18:44:43 crc kubenswrapper[4995]: I0120 18:44:43.271331 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 18:44:43 crc kubenswrapper[4995]: I0120 18:44:43.989278 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:44:43 crc kubenswrapper[4995]: E0120 18:44:43.990206 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:44:44 crc kubenswrapper[4995]: I0120 18:44:44.072265 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-xxvtq_5573e17e-4b7e-4afd-8608-e8afd1c98256/control-plane-machine-set-operator/0.log" Jan 20 18:44:44 crc kubenswrapper[4995]: I0120 18:44:44.088509 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l4kss_2585d1e6-a851-4ecc-8acd-8fd3d2426576/kube-rbac-proxy/0.log" Jan 20 18:44:44 crc kubenswrapper[4995]: I0120 18:44:44.097650 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-l4kss_2585d1e6-a851-4ecc-8acd-8fd3d2426576/machine-api-operator/0.log" Jan 20 18:44:44 crc kubenswrapper[4995]: I0120 18:44:44.276947 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8f9c" event={"ID":"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3","Type":"ContainerStarted","Data":"f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491"} Jan 20 18:44:44 crc kubenswrapper[4995]: I0120 18:44:44.278558 4995 generic.go:334] "Generic (PLEG): container finished" podID="9a681626-4bff-4503-9380-b3fdb32b264a" containerID="48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019" exitCode=0 Jan 20 18:44:44 crc kubenswrapper[4995]: I0120 18:44:44.278601 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n6zb7" event={"ID":"9a681626-4bff-4503-9380-b3fdb32b264a","Type":"ContainerDied","Data":"48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019"} Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.277483 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-7zdch_c0a3e997-8709-444b-ae4e-8fc34b04cb6e/manager/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.292611 4995 generic.go:334] "Generic (PLEG): container finished" podID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerID="f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491" exitCode=0 Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.292661 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8f9c" event={"ID":"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3","Type":"ContainerDied","Data":"f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491"} Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.348558 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-zgvcz_3d7183f8-a0bc-4010-b80c-d2d2a1eedf1a/manager/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.368769 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-mj76w_9f302bf3-1501-44cc-924c-2e5c42c0eb58/manager/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.381740 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/extract/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.388373 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/util/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.398510 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ebcacf4ad482919873da5ba9dc07d1f41cb6c4b04c1105584a072e8dfa86bvr_da01a294-6f73-4389-8117-a857e195a1c8/pull/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.518925 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-bxm9j_072647c8-2d0e-4716-bb29-a87e3ff5cd29/manager/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.529326 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-wm2kb_49392c07-237b-447e-a126-f06e1cbf32a2/manager/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.553594 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-zs4nf_c8061771-759d-49d5-b88b-9d66f45277ac/manager/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.859743 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-zd75z_439ab902-28ff-48a4-81e4-93c72937e573/manager/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.879824 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-7p5v4_f4577775-2c19-495a-95e7-1638f359b533/manager/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.967366 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-dwn52_a3c2211e-845d-47cc-b4a5-962340b0d53c/manager/0.log" Jan 20 18:44:45 crc kubenswrapper[4995]: I0120 18:44:45.986968 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-wjrpf_93ac6eeb-0456-4cfe-8298-b8b97d09716c/manager/0.log" Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.023748 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-qd2nk_47ec26a3-41ca-482f-b539-c9dc32af0bb0/manager/0.log" Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.073632 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-m7p7b_86d4f806-c5e4-4ce0-a859-5e104b0d5dce/manager/0.log" Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.171816 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-95tbl_ffe39c73-665e-4de6-afb5-2e9b93419e33/manager/0.log" Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.186704 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-fk7x2_de6fc9c2-f9a9-41fd-8cfb-b0493d823c20/manager/0.log" Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.205776 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854r9sf9_50e51652-8f18-4234-b29b-85e684e63bfd/manager/0.log" Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.309761 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8f9c" event={"ID":"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3","Type":"ContainerStarted","Data":"a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1"} Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.312619 4995 generic.go:334] "Generic (PLEG): container finished" podID="9a681626-4bff-4503-9380-b3fdb32b264a" containerID="7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198" exitCode=0 Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.312670 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n6zb7" event={"ID":"9a681626-4bff-4503-9380-b3fdb32b264a","Type":"ContainerDied","Data":"7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198"} Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.332678 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5c987874f9-t2thd_6bd0aa66-ff4d-43ff-925d-e3ead5943058/operator/0.log" Jan 20 18:44:46 crc kubenswrapper[4995]: I0120 18:44:46.333873 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g8f9c" podStartSLOduration=4.79242311 podStartE2EDuration="7.333851489s" podCreationTimestamp="2026-01-20 18:44:39 +0000 UTC" firstStartedPulling="2026-01-20 18:44:43.271089292 +0000 UTC m=+8001.515694098" lastFinishedPulling="2026-01-20 18:44:45.812517671 +0000 UTC m=+8004.057122477" observedRunningTime="2026-01-20 18:44:46.326278675 +0000 UTC m=+8004.570883501" watchObservedRunningTime="2026-01-20 18:44:46.333851489 +0000 UTC m=+8004.578456285" Jan 20 18:44:47 crc kubenswrapper[4995]: I0120 18:44:47.357181 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n6zb7" event={"ID":"9a681626-4bff-4503-9380-b3fdb32b264a","Type":"ContainerStarted","Data":"b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e"} Jan 20 18:44:47 crc kubenswrapper[4995]: I0120 18:44:47.389766 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n6zb7" podStartSLOduration=2.7940000830000002 podStartE2EDuration="5.389750252s" podCreationTimestamp="2026-01-20 18:44:42 +0000 UTC" firstStartedPulling="2026-01-20 18:44:44.280346711 +0000 UTC m=+8002.524951507" lastFinishedPulling="2026-01-20 18:44:46.87609687 +0000 UTC m=+8005.120701676" observedRunningTime="2026-01-20 18:44:47.381299855 +0000 UTC m=+8005.625904671" watchObservedRunningTime="2026-01-20 18:44:47.389750252 +0000 UTC m=+8005.634355058" Jan 20 18:44:47 crc kubenswrapper[4995]: I0120 18:44:47.706019 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7696897b84-8gt6d_bb15a8a1-9d6b-4032-9ecb-71719f2b3d91/manager/0.log" Jan 20 18:44:47 crc kubenswrapper[4995]: I0120 18:44:47.717989 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-r87vf_3e43abde-a2a7-4334-a3a2-7859aad1a87b/registry-server/0.log" Jan 20 18:44:47 crc kubenswrapper[4995]: I0120 18:44:47.772000 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-vvwk8_9d2f128c-9463-4735-9bf7-91bff7148887/manager/0.log" Jan 20 18:44:47 crc kubenswrapper[4995]: I0120 18:44:47.797186 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-6rn8v_37f347f2-1ab4-4e49-9340-57a960ff8eb1/manager/0.log" Jan 20 18:44:47 crc kubenswrapper[4995]: I0120 18:44:47.822457 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-jwzhh_31bd181f-39ff-4e9f-949c-8a6ed84f3f42/operator/0.log" Jan 20 18:44:47 crc kubenswrapper[4995]: I0120 18:44:47.848146 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-85dd56d4cc-dxnvv_d0afd012-c6e1-4a66-a8a1-9edccfdff278/manager/0.log" Jan 20 18:44:48 crc kubenswrapper[4995]: I0120 18:44:48.018622 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-fdpgr_c2d307fa-2be9-4f04-8ae4-f3b55e987ceb/manager/0.log" Jan 20 18:44:48 crc kubenswrapper[4995]: I0120 18:44:48.033002 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-csd5m_8a04fc71-9575-4cf5-bdab-2c741002c47f/manager/0.log" Jan 20 18:44:48 crc kubenswrapper[4995]: I0120 18:44:48.104660 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6db9b5db6c-29hz8_17dfb7c9-6832-48d3-ad83-91508cf85de3/manager/0.log" Jan 20 18:44:49 crc kubenswrapper[4995]: I0120 18:44:49.457907 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:49 crc kubenswrapper[4995]: I0120 18:44:49.458994 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:49 crc kubenswrapper[4995]: I0120 18:44:49.523977 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.105879 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/kube-multus-additional-cni-plugins/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.113748 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/egress-router-binary-copy/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.120840 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/cni-plugins/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.127071 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/bond-cni-plugin/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.135789 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/routeoverride-cni/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.143503 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/whereabouts-cni-bincopy/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.150036 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-vj5zz_a0e5bec5-c9a4-46b0-87c1-5eea75de723e/whereabouts-cni/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.177167 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-vw77m_7b1451e1-776c-411e-9790-8091d11c01fd/multus-admission-controller/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.181952 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-vw77m_7b1451e1-776c-411e-9790-8091d11c01fd/kube-rbac-proxy/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.223321 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/2.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.332988 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-vlvwg_5008a882-4540-4ebe-8a27-53f0de0cbd4a/kube-multus/3.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.378189 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-kbdtf_9dfc8bb5-28e8-4ba3-8009-09d5585a1a12/network-metrics-daemon/0.log" Jan 20 18:44:50 crc kubenswrapper[4995]: I0120 18:44:50.383047 4995 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-kbdtf_9dfc8bb5-28e8-4ba3-8009-09d5585a1a12/kube-rbac-proxy/0.log" Jan 20 18:44:51 crc kubenswrapper[4995]: I0120 18:44:51.446613 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:51 crc kubenswrapper[4995]: I0120 18:44:51.863727 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g8f9c"] Jan 20 18:44:52 crc kubenswrapper[4995]: I0120 18:44:52.618181 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:52 crc kubenswrapper[4995]: I0120 18:44:52.618234 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:52 crc kubenswrapper[4995]: I0120 18:44:52.698015 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:53 crc kubenswrapper[4995]: I0120 18:44:53.427224 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-g8f9c" podUID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerName="registry-server" containerID="cri-o://a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1" gracePeriod=2 Jan 20 18:44:53 crc kubenswrapper[4995]: I0120 18:44:53.487367 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:53 crc kubenswrapper[4995]: I0120 18:44:53.954760 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:53 crc kubenswrapper[4995]: I0120 18:44:53.959426 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llrvn\" (UniqueName: \"kubernetes.io/projected/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-kube-api-access-llrvn\") pod \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " Jan 20 18:44:53 crc kubenswrapper[4995]: I0120 18:44:53.959488 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-catalog-content\") pod \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " Jan 20 18:44:53 crc kubenswrapper[4995]: I0120 18:44:53.959594 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-utilities\") pod \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\" (UID: \"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3\") " Jan 20 18:44:53 crc kubenswrapper[4995]: I0120 18:44:53.960681 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-utilities" (OuterVolumeSpecName: "utilities") pod "a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" (UID: "a2c471c6-839b-44c8-8ba4-06b5ac3e25c3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:44:53 crc kubenswrapper[4995]: I0120 18:44:53.971521 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-kube-api-access-llrvn" (OuterVolumeSpecName: "kube-api-access-llrvn") pod "a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" (UID: "a2c471c6-839b-44c8-8ba4-06b5ac3e25c3"). InnerVolumeSpecName "kube-api-access-llrvn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.029012 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" (UID: "a2c471c6-839b-44c8-8ba4-06b5ac3e25c3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.061388 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.061418 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llrvn\" (UniqueName: \"kubernetes.io/projected/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-kube-api-access-llrvn\") on node \"crc\" DevicePath \"\"" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.061428 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.439223 4995 generic.go:334] "Generic (PLEG): container finished" podID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerID="a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1" exitCode=0 Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.439485 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g8f9c" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.439533 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8f9c" event={"ID":"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3","Type":"ContainerDied","Data":"a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1"} Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.439617 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g8f9c" event={"ID":"a2c471c6-839b-44c8-8ba4-06b5ac3e25c3","Type":"ContainerDied","Data":"e2b9b24e10fbb0da48b950cf2adc8e9e29b6c5b7e9755535a9f41d18b8c9921e"} Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.439645 4995 scope.go:117] "RemoveContainer" containerID="a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.479051 4995 scope.go:117] "RemoveContainer" containerID="f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.486849 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n6zb7"] Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.508180 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g8f9c"] Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.512728 4995 scope.go:117] "RemoveContainer" containerID="70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.514747 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-g8f9c"] Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.559904 4995 scope.go:117] "RemoveContainer" containerID="a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1" Jan 20 18:44:54 crc kubenswrapper[4995]: E0120 18:44:54.561427 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1\": container with ID starting with a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1 not found: ID does not exist" containerID="a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.561474 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1"} err="failed to get container status \"a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1\": rpc error: code = NotFound desc = could not find container \"a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1\": container with ID starting with a8a72e135fd277e7ae0755571f7cf286c98f50accef47458f2dd541b2985d9e1 not found: ID does not exist" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.561507 4995 scope.go:117] "RemoveContainer" containerID="f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491" Jan 20 18:44:54 crc kubenswrapper[4995]: E0120 18:44:54.562652 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491\": container with ID starting with f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491 not found: ID does not exist" containerID="f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.562695 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491"} err="failed to get container status \"f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491\": rpc error: code = NotFound desc = could not find container \"f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491\": container with ID starting with f6380be993af85ac084cf3a3588c1934e9bd6da75c0e348f97f19a3b1241f491 not found: ID does not exist" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.562720 4995 scope.go:117] "RemoveContainer" containerID="70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee" Jan 20 18:44:54 crc kubenswrapper[4995]: E0120 18:44:54.563211 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee\": container with ID starting with 70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee not found: ID does not exist" containerID="70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee" Jan 20 18:44:54 crc kubenswrapper[4995]: I0120 18:44:54.566724 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee"} err="failed to get container status \"70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee\": rpc error: code = NotFound desc = could not find container \"70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee\": container with ID starting with 70014a170c7d4d5807327401363ffd18c33617012feee5aceffed9cd5d2d1eee not found: ID does not exist" Jan 20 18:44:55 crc kubenswrapper[4995]: I0120 18:44:55.450251 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n6zb7" podUID="9a681626-4bff-4503-9380-b3fdb32b264a" containerName="registry-server" containerID="cri-o://b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e" gracePeriod=2 Jan 20 18:44:55 crc kubenswrapper[4995]: I0120 18:44:55.933948 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.009451 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p59vj\" (UniqueName: \"kubernetes.io/projected/9a681626-4bff-4503-9380-b3fdb32b264a-kube-api-access-p59vj\") pod \"9a681626-4bff-4503-9380-b3fdb32b264a\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.010099 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-catalog-content\") pod \"9a681626-4bff-4503-9380-b3fdb32b264a\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.011415 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" path="/var/lib/kubelet/pods/a2c471c6-839b-44c8-8ba4-06b5ac3e25c3/volumes" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.012364 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-utilities\") pod \"9a681626-4bff-4503-9380-b3fdb32b264a\" (UID: \"9a681626-4bff-4503-9380-b3fdb32b264a\") " Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.014610 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-utilities" (OuterVolumeSpecName: "utilities") pod "9a681626-4bff-4503-9380-b3fdb32b264a" (UID: "9a681626-4bff-4503-9380-b3fdb32b264a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.017140 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a681626-4bff-4503-9380-b3fdb32b264a-kube-api-access-p59vj" (OuterVolumeSpecName: "kube-api-access-p59vj") pod "9a681626-4bff-4503-9380-b3fdb32b264a" (UID: "9a681626-4bff-4503-9380-b3fdb32b264a"). InnerVolumeSpecName "kube-api-access-p59vj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.039269 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a681626-4bff-4503-9380-b3fdb32b264a" (UID: "9a681626-4bff-4503-9380-b3fdb32b264a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.114330 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p59vj\" (UniqueName: \"kubernetes.io/projected/9a681626-4bff-4503-9380-b3fdb32b264a-kube-api-access-p59vj\") on node \"crc\" DevicePath \"\"" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.114356 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.114365 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a681626-4bff-4503-9380-b3fdb32b264a-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.461437 4995 generic.go:334] "Generic (PLEG): container finished" podID="9a681626-4bff-4503-9380-b3fdb32b264a" containerID="b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e" exitCode=0 Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.461514 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n6zb7" event={"ID":"9a681626-4bff-4503-9380-b3fdb32b264a","Type":"ContainerDied","Data":"b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e"} Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.461548 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n6zb7" event={"ID":"9a681626-4bff-4503-9380-b3fdb32b264a","Type":"ContainerDied","Data":"2f65d09f51f294539b6c49e690f7c9813bd0d5634207bb9d795fa07c50b6841f"} Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.461569 4995 scope.go:117] "RemoveContainer" containerID="b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.461692 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n6zb7" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.487561 4995 scope.go:117] "RemoveContainer" containerID="7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.512006 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n6zb7"] Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.525362 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n6zb7"] Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.527201 4995 scope.go:117] "RemoveContainer" containerID="48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.582276 4995 scope.go:117] "RemoveContainer" containerID="b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e" Jan 20 18:44:56 crc kubenswrapper[4995]: E0120 18:44:56.583034 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e\": container with ID starting with b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e not found: ID does not exist" containerID="b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.583101 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e"} err="failed to get container status \"b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e\": rpc error: code = NotFound desc = could not find container \"b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e\": container with ID starting with b79517d2c18dde965aacdefec69bf744214bf89c77d2e7c64f039954a8e6e41e not found: ID does not exist" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.583134 4995 scope.go:117] "RemoveContainer" containerID="7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198" Jan 20 18:44:56 crc kubenswrapper[4995]: E0120 18:44:56.583473 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198\": container with ID starting with 7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198 not found: ID does not exist" containerID="7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.583511 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198"} err="failed to get container status \"7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198\": rpc error: code = NotFound desc = could not find container \"7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198\": container with ID starting with 7e08e824fa3bddac9787fdb1ddff7b078bd770aef2953812573466c637b05198 not found: ID does not exist" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.583530 4995 scope.go:117] "RemoveContainer" containerID="48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019" Jan 20 18:44:56 crc kubenswrapper[4995]: E0120 18:44:56.583756 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019\": container with ID starting with 48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019 not found: ID does not exist" containerID="48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.583783 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019"} err="failed to get container status \"48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019\": rpc error: code = NotFound desc = could not find container \"48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019\": container with ID starting with 48220bec1693c3bb7dfeeb23ee8700b9ccc18afaac9a81c38eb07b2f832df019 not found: ID does not exist" Jan 20 18:44:56 crc kubenswrapper[4995]: I0120 18:44:56.990044 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:44:56 crc kubenswrapper[4995]: E0120 18:44:56.990570 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:44:58 crc kubenswrapper[4995]: I0120 18:44:58.002586 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a681626-4bff-4503-9380-b3fdb32b264a" path="/var/lib/kubelet/pods/9a681626-4bff-4503-9380-b3fdb32b264a/volumes" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.185842 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g"] Jan 20 18:45:00 crc kubenswrapper[4995]: E0120 18:45:00.186563 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a681626-4bff-4503-9380-b3fdb32b264a" containerName="registry-server" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.186578 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a681626-4bff-4503-9380-b3fdb32b264a" containerName="registry-server" Jan 20 18:45:00 crc kubenswrapper[4995]: E0120 18:45:00.186586 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a681626-4bff-4503-9380-b3fdb32b264a" containerName="extract-content" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.186592 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a681626-4bff-4503-9380-b3fdb32b264a" containerName="extract-content" Jan 20 18:45:00 crc kubenswrapper[4995]: E0120 18:45:00.186620 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerName="extract-utilities" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.186628 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerName="extract-utilities" Jan 20 18:45:00 crc kubenswrapper[4995]: E0120 18:45:00.186642 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerName="extract-content" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.186648 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerName="extract-content" Jan 20 18:45:00 crc kubenswrapper[4995]: E0120 18:45:00.186660 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerName="registry-server" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.186666 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerName="registry-server" Jan 20 18:45:00 crc kubenswrapper[4995]: E0120 18:45:00.186673 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a681626-4bff-4503-9380-b3fdb32b264a" containerName="extract-utilities" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.186679 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a681626-4bff-4503-9380-b3fdb32b264a" containerName="extract-utilities" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.186853 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a681626-4bff-4503-9380-b3fdb32b264a" containerName="registry-server" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.186872 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2c471c6-839b-44c8-8ba4-06b5ac3e25c3" containerName="registry-server" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.187506 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.190940 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.195069 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.204986 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g"] Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.296522 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-config-volume\") pod \"collect-profiles-29482245-crh2g\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.296605 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-secret-volume\") pod \"collect-profiles-29482245-crh2g\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.296698 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5s4z\" (UniqueName: \"kubernetes.io/projected/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-kube-api-access-x5s4z\") pod \"collect-profiles-29482245-crh2g\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.398539 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5s4z\" (UniqueName: \"kubernetes.io/projected/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-kube-api-access-x5s4z\") pod \"collect-profiles-29482245-crh2g\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.398671 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-config-volume\") pod \"collect-profiles-29482245-crh2g\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.398755 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-secret-volume\") pod \"collect-profiles-29482245-crh2g\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.400103 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-config-volume\") pod \"collect-profiles-29482245-crh2g\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.414153 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-secret-volume\") pod \"collect-profiles-29482245-crh2g\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.419345 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5s4z\" (UniqueName: \"kubernetes.io/projected/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-kube-api-access-x5s4z\") pod \"collect-profiles-29482245-crh2g\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.515713 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:00 crc kubenswrapper[4995]: I0120 18:45:00.798287 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g"] Jan 20 18:45:01 crc kubenswrapper[4995]: I0120 18:45:01.510976 4995 generic.go:334] "Generic (PLEG): container finished" podID="74bfccc6-a1cd-4a20-b387-e2b447bfc1c6" containerID="858cdad20efd289d631f892fba5428ad47cdeff051a611b69d8b6c60518bc237" exitCode=0 Jan 20 18:45:01 crc kubenswrapper[4995]: I0120 18:45:01.511033 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" event={"ID":"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6","Type":"ContainerDied","Data":"858cdad20efd289d631f892fba5428ad47cdeff051a611b69d8b6c60518bc237"} Jan 20 18:45:01 crc kubenswrapper[4995]: I0120 18:45:01.511301 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" event={"ID":"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6","Type":"ContainerStarted","Data":"eed204224c2da610d73ed39eca68a7a8c5dddf9f0cbfdc81c73845884bf1a0a7"} Jan 20 18:45:02 crc kubenswrapper[4995]: I0120 18:45:02.941658 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.070748 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-secret-volume\") pod \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.070805 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-config-volume\") pod \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.070951 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5s4z\" (UniqueName: \"kubernetes.io/projected/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-kube-api-access-x5s4z\") pod \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\" (UID: \"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6\") " Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.071954 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-config-volume" (OuterVolumeSpecName: "config-volume") pod "74bfccc6-a1cd-4a20-b387-e2b447bfc1c6" (UID: "74bfccc6-a1cd-4a20-b387-e2b447bfc1c6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.080010 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-kube-api-access-x5s4z" (OuterVolumeSpecName: "kube-api-access-x5s4z") pod "74bfccc6-a1cd-4a20-b387-e2b447bfc1c6" (UID: "74bfccc6-a1cd-4a20-b387-e2b447bfc1c6"). InnerVolumeSpecName "kube-api-access-x5s4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.086338 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "74bfccc6-a1cd-4a20-b387-e2b447bfc1c6" (UID: "74bfccc6-a1cd-4a20-b387-e2b447bfc1c6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.173764 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5s4z\" (UniqueName: \"kubernetes.io/projected/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-kube-api-access-x5s4z\") on node \"crc\" DevicePath \"\"" Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.173797 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.173806 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74bfccc6-a1cd-4a20-b387-e2b447bfc1c6-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.543427 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" event={"ID":"74bfccc6-a1cd-4a20-b387-e2b447bfc1c6","Type":"ContainerDied","Data":"eed204224c2da610d73ed39eca68a7a8c5dddf9f0cbfdc81c73845884bf1a0a7"} Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.543461 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eed204224c2da610d73ed39eca68a7a8c5dddf9f0cbfdc81c73845884bf1a0a7" Jan 20 18:45:03 crc kubenswrapper[4995]: I0120 18:45:03.543515 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482245-crh2g" Jan 20 18:45:04 crc kubenswrapper[4995]: I0120 18:45:04.053236 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g"] Jan 20 18:45:04 crc kubenswrapper[4995]: I0120 18:45:04.063649 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482200-rww5g"] Jan 20 18:45:06 crc kubenswrapper[4995]: I0120 18:45:06.012352 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca1d336d-024b-413d-b07d-f04552656570" path="/var/lib/kubelet/pods/ca1d336d-024b-413d-b07d-f04552656570/volumes" Jan 20 18:45:08 crc kubenswrapper[4995]: I0120 18:45:08.989612 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:45:08 crc kubenswrapper[4995]: E0120 18:45:08.990649 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:45:23 crc kubenswrapper[4995]: I0120 18:45:23.990383 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:45:23 crc kubenswrapper[4995]: E0120 18:45:23.991062 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:45:37 crc kubenswrapper[4995]: I0120 18:45:37.077711 4995 scope.go:117] "RemoveContainer" containerID="b14a87a125e7e41247218d2a5a2f6d0821d94f21b585fed1eb9d1020cdee3b04" Jan 20 18:45:38 crc kubenswrapper[4995]: I0120 18:45:38.996621 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:45:38 crc kubenswrapper[4995]: E0120 18:45:38.997584 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.650668 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6xc4x"] Jan 20 18:45:46 crc kubenswrapper[4995]: E0120 18:45:46.651848 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74bfccc6-a1cd-4a20-b387-e2b447bfc1c6" containerName="collect-profiles" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.651863 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="74bfccc6-a1cd-4a20-b387-e2b447bfc1c6" containerName="collect-profiles" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.652095 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="74bfccc6-a1cd-4a20-b387-e2b447bfc1c6" containerName="collect-profiles" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.653896 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.662500 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6xc4x"] Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.808130 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-catalog-content\") pod \"community-operators-6xc4x\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.808217 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6twfv\" (UniqueName: \"kubernetes.io/projected/163b0b49-77d5-4adf-a39b-2f127aca968b-kube-api-access-6twfv\") pod \"community-operators-6xc4x\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.808259 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-utilities\") pod \"community-operators-6xc4x\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.909871 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-utilities\") pod \"community-operators-6xc4x\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.910046 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-catalog-content\") pod \"community-operators-6xc4x\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.910175 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6twfv\" (UniqueName: \"kubernetes.io/projected/163b0b49-77d5-4adf-a39b-2f127aca968b-kube-api-access-6twfv\") pod \"community-operators-6xc4x\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.910638 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-utilities\") pod \"community-operators-6xc4x\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.910915 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-catalog-content\") pod \"community-operators-6xc4x\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.955003 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6twfv\" (UniqueName: \"kubernetes.io/projected/163b0b49-77d5-4adf-a39b-2f127aca968b-kube-api-access-6twfv\") pod \"community-operators-6xc4x\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:46 crc kubenswrapper[4995]: I0120 18:45:46.984999 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:47 crc kubenswrapper[4995]: I0120 18:45:47.353821 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6xc4x"] Jan 20 18:45:47 crc kubenswrapper[4995]: W0120 18:45:47.354626 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod163b0b49_77d5_4adf_a39b_2f127aca968b.slice/crio-969efae2ee99c7c096b9ae6945a6f6f1922595575ff4a1cb772aeb6d8994f45f WatchSource:0}: Error finding container 969efae2ee99c7c096b9ae6945a6f6f1922595575ff4a1cb772aeb6d8994f45f: Status 404 returned error can't find the container with id 969efae2ee99c7c096b9ae6945a6f6f1922595575ff4a1cb772aeb6d8994f45f Jan 20 18:45:48 crc kubenswrapper[4995]: I0120 18:45:48.010430 4995 generic.go:334] "Generic (PLEG): container finished" podID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerID="a8c233a3fdac0d55be08a35d473331ba7059a7684cdeacf1f4a875644bddc936" exitCode=0 Jan 20 18:45:48 crc kubenswrapper[4995]: I0120 18:45:48.010693 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xc4x" event={"ID":"163b0b49-77d5-4adf-a39b-2f127aca968b","Type":"ContainerDied","Data":"a8c233a3fdac0d55be08a35d473331ba7059a7684cdeacf1f4a875644bddc936"} Jan 20 18:45:48 crc kubenswrapper[4995]: I0120 18:45:48.010720 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xc4x" event={"ID":"163b0b49-77d5-4adf-a39b-2f127aca968b","Type":"ContainerStarted","Data":"969efae2ee99c7c096b9ae6945a6f6f1922595575ff4a1cb772aeb6d8994f45f"} Jan 20 18:45:49 crc kubenswrapper[4995]: I0120 18:45:49.020538 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xc4x" event={"ID":"163b0b49-77d5-4adf-a39b-2f127aca968b","Type":"ContainerStarted","Data":"bd857678cb2347c4c2f882512765d38ae6a84b18be1ac02cad9fbeba8cb82d3e"} Jan 20 18:45:50 crc kubenswrapper[4995]: I0120 18:45:50.044882 4995 generic.go:334] "Generic (PLEG): container finished" podID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerID="bd857678cb2347c4c2f882512765d38ae6a84b18be1ac02cad9fbeba8cb82d3e" exitCode=0 Jan 20 18:45:50 crc kubenswrapper[4995]: I0120 18:45:50.044929 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xc4x" event={"ID":"163b0b49-77d5-4adf-a39b-2f127aca968b","Type":"ContainerDied","Data":"bd857678cb2347c4c2f882512765d38ae6a84b18be1ac02cad9fbeba8cb82d3e"} Jan 20 18:45:51 crc kubenswrapper[4995]: I0120 18:45:51.055419 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xc4x" event={"ID":"163b0b49-77d5-4adf-a39b-2f127aca968b","Type":"ContainerStarted","Data":"03fe890a78f12e375fde9a4b9077e7d16fe8c47a3af5ccf7d91dbe4e4bdbeb02"} Jan 20 18:45:51 crc kubenswrapper[4995]: I0120 18:45:51.075989 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6xc4x" podStartSLOduration=2.607426675 podStartE2EDuration="5.075973914s" podCreationTimestamp="2026-01-20 18:45:46 +0000 UTC" firstStartedPulling="2026-01-20 18:45:48.012113167 +0000 UTC m=+8066.256717973" lastFinishedPulling="2026-01-20 18:45:50.480660406 +0000 UTC m=+8068.725265212" observedRunningTime="2026-01-20 18:45:51.07360221 +0000 UTC m=+8069.318207016" watchObservedRunningTime="2026-01-20 18:45:51.075973914 +0000 UTC m=+8069.320578710" Jan 20 18:45:52 crc kubenswrapper[4995]: I0120 18:45:52.989128 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:45:52 crc kubenswrapper[4995]: E0120 18:45:52.990013 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:45:56 crc kubenswrapper[4995]: I0120 18:45:56.985678 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:56 crc kubenswrapper[4995]: I0120 18:45:56.986253 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:57 crc kubenswrapper[4995]: I0120 18:45:57.033347 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:57 crc kubenswrapper[4995]: I0120 18:45:57.146342 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:45:57 crc kubenswrapper[4995]: I0120 18:45:57.269795 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6xc4x"] Jan 20 18:45:59 crc kubenswrapper[4995]: I0120 18:45:59.120333 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6xc4x" podUID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerName="registry-server" containerID="cri-o://03fe890a78f12e375fde9a4b9077e7d16fe8c47a3af5ccf7d91dbe4e4bdbeb02" gracePeriod=2 Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.134448 4995 generic.go:334] "Generic (PLEG): container finished" podID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerID="03fe890a78f12e375fde9a4b9077e7d16fe8c47a3af5ccf7d91dbe4e4bdbeb02" exitCode=0 Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.134530 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xc4x" event={"ID":"163b0b49-77d5-4adf-a39b-2f127aca968b","Type":"ContainerDied","Data":"03fe890a78f12e375fde9a4b9077e7d16fe8c47a3af5ccf7d91dbe4e4bdbeb02"} Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.134983 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6xc4x" event={"ID":"163b0b49-77d5-4adf-a39b-2f127aca968b","Type":"ContainerDied","Data":"969efae2ee99c7c096b9ae6945a6f6f1922595575ff4a1cb772aeb6d8994f45f"} Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.134997 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="969efae2ee99c7c096b9ae6945a6f6f1922595575ff4a1cb772aeb6d8994f45f" Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.135738 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.208845 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-catalog-content\") pod \"163b0b49-77d5-4adf-a39b-2f127aca968b\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.208938 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6twfv\" (UniqueName: \"kubernetes.io/projected/163b0b49-77d5-4adf-a39b-2f127aca968b-kube-api-access-6twfv\") pod \"163b0b49-77d5-4adf-a39b-2f127aca968b\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.209167 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-utilities\") pod \"163b0b49-77d5-4adf-a39b-2f127aca968b\" (UID: \"163b0b49-77d5-4adf-a39b-2f127aca968b\") " Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.210347 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-utilities" (OuterVolumeSpecName: "utilities") pod "163b0b49-77d5-4adf-a39b-2f127aca968b" (UID: "163b0b49-77d5-4adf-a39b-2f127aca968b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.217096 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/163b0b49-77d5-4adf-a39b-2f127aca968b-kube-api-access-6twfv" (OuterVolumeSpecName: "kube-api-access-6twfv") pod "163b0b49-77d5-4adf-a39b-2f127aca968b" (UID: "163b0b49-77d5-4adf-a39b-2f127aca968b"). InnerVolumeSpecName "kube-api-access-6twfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.291849 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "163b0b49-77d5-4adf-a39b-2f127aca968b" (UID: "163b0b49-77d5-4adf-a39b-2f127aca968b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.311874 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.311907 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/163b0b49-77d5-4adf-a39b-2f127aca968b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:46:00 crc kubenswrapper[4995]: I0120 18:46:00.311923 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6twfv\" (UniqueName: \"kubernetes.io/projected/163b0b49-77d5-4adf-a39b-2f127aca968b-kube-api-access-6twfv\") on node \"crc\" DevicePath \"\"" Jan 20 18:46:01 crc kubenswrapper[4995]: I0120 18:46:01.142277 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6xc4x" Jan 20 18:46:01 crc kubenswrapper[4995]: I0120 18:46:01.175350 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6xc4x"] Jan 20 18:46:01 crc kubenswrapper[4995]: I0120 18:46:01.182759 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6xc4x"] Jan 20 18:46:02 crc kubenswrapper[4995]: I0120 18:46:02.011349 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="163b0b49-77d5-4adf-a39b-2f127aca968b" path="/var/lib/kubelet/pods/163b0b49-77d5-4adf-a39b-2f127aca968b/volumes" Jan 20 18:46:07 crc kubenswrapper[4995]: I0120 18:46:07.990131 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:46:09 crc kubenswrapper[4995]: I0120 18:46:09.236923 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"f054a88c349d4ba6283fbb84635cc78c46f3468be5ffe05565554df9f1a4bcdc"} Jan 20 18:48:30 crc kubenswrapper[4995]: I0120 18:48:30.572207 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:48:30 crc kubenswrapper[4995]: I0120 18:48:30.573174 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.181516 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wvfm7"] Jan 20 18:48:37 crc kubenswrapper[4995]: E0120 18:48:37.182431 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerName="extract-utilities" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.182446 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerName="extract-utilities" Jan 20 18:48:37 crc kubenswrapper[4995]: E0120 18:48:37.182459 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerName="registry-server" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.182465 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerName="registry-server" Jan 20 18:48:37 crc kubenswrapper[4995]: E0120 18:48:37.182495 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerName="extract-content" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.182501 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerName="extract-content" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.182684 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="163b0b49-77d5-4adf-a39b-2f127aca968b" containerName="registry-server" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.184017 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.196016 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wvfm7"] Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.204790 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-catalog-content\") pod \"redhat-operators-wvfm7\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.204860 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4spp\" (UniqueName: \"kubernetes.io/projected/7d303d31-47bc-404d-9b43-9ab0dcc09125-kube-api-access-p4spp\") pod \"redhat-operators-wvfm7\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.204895 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-utilities\") pod \"redhat-operators-wvfm7\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.209532 4995 scope.go:117] "RemoveContainer" containerID="53b946962438b1dfc95ad08a73d1777a6de3f2c50b110ca6708c4e049701e11a" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.307100 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-catalog-content\") pod \"redhat-operators-wvfm7\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.307161 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4spp\" (UniqueName: \"kubernetes.io/projected/7d303d31-47bc-404d-9b43-9ab0dcc09125-kube-api-access-p4spp\") pod \"redhat-operators-wvfm7\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.307196 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-utilities\") pod \"redhat-operators-wvfm7\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.307604 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-catalog-content\") pod \"redhat-operators-wvfm7\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.307672 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-utilities\") pod \"redhat-operators-wvfm7\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.327706 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4spp\" (UniqueName: \"kubernetes.io/projected/7d303d31-47bc-404d-9b43-9ab0dcc09125-kube-api-access-p4spp\") pod \"redhat-operators-wvfm7\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:37 crc kubenswrapper[4995]: I0120 18:48:37.502580 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:38 crc kubenswrapper[4995]: I0120 18:48:38.072998 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wvfm7"] Jan 20 18:48:38 crc kubenswrapper[4995]: I0120 18:48:38.860033 4995 generic.go:334] "Generic (PLEG): container finished" podID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerID="cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc" exitCode=0 Jan 20 18:48:38 crc kubenswrapper[4995]: I0120 18:48:38.860156 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvfm7" event={"ID":"7d303d31-47bc-404d-9b43-9ab0dcc09125","Type":"ContainerDied","Data":"cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc"} Jan 20 18:48:38 crc kubenswrapper[4995]: I0120 18:48:38.860327 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvfm7" event={"ID":"7d303d31-47bc-404d-9b43-9ab0dcc09125","Type":"ContainerStarted","Data":"17784ed093bec05ba90e2b10e2946fedaad59ee77a3ce23651ad96d7652f25aa"} Jan 20 18:48:40 crc kubenswrapper[4995]: I0120 18:48:40.917121 4995 generic.go:334] "Generic (PLEG): container finished" podID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerID="ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb" exitCode=0 Jan 20 18:48:40 crc kubenswrapper[4995]: I0120 18:48:40.917170 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvfm7" event={"ID":"7d303d31-47bc-404d-9b43-9ab0dcc09125","Type":"ContainerDied","Data":"ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb"} Jan 20 18:48:41 crc kubenswrapper[4995]: I0120 18:48:41.936614 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvfm7" event={"ID":"7d303d31-47bc-404d-9b43-9ab0dcc09125","Type":"ContainerStarted","Data":"db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1"} Jan 20 18:48:41 crc kubenswrapper[4995]: I0120 18:48:41.969759 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wvfm7" podStartSLOduration=2.338303613 podStartE2EDuration="4.969729093s" podCreationTimestamp="2026-01-20 18:48:37 +0000 UTC" firstStartedPulling="2026-01-20 18:48:38.862969172 +0000 UTC m=+8237.107573978" lastFinishedPulling="2026-01-20 18:48:41.494394632 +0000 UTC m=+8239.738999458" observedRunningTime="2026-01-20 18:48:41.956854957 +0000 UTC m=+8240.201459783" watchObservedRunningTime="2026-01-20 18:48:41.969729093 +0000 UTC m=+8240.214333899" Jan 20 18:48:47 crc kubenswrapper[4995]: I0120 18:48:47.502826 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:47 crc kubenswrapper[4995]: I0120 18:48:47.503521 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:48 crc kubenswrapper[4995]: I0120 18:48:48.564503 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wvfm7" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerName="registry-server" probeResult="failure" output=< Jan 20 18:48:48 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 18:48:48 crc kubenswrapper[4995]: > Jan 20 18:48:57 crc kubenswrapper[4995]: I0120 18:48:57.555807 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:57 crc kubenswrapper[4995]: I0120 18:48:57.609780 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:57 crc kubenswrapper[4995]: I0120 18:48:57.806283 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wvfm7"] Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.112402 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wvfm7" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerName="registry-server" containerID="cri-o://db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1" gracePeriod=2 Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.585903 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.703845 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4spp\" (UniqueName: \"kubernetes.io/projected/7d303d31-47bc-404d-9b43-9ab0dcc09125-kube-api-access-p4spp\") pod \"7d303d31-47bc-404d-9b43-9ab0dcc09125\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.704018 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-catalog-content\") pod \"7d303d31-47bc-404d-9b43-9ab0dcc09125\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.704112 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-utilities\") pod \"7d303d31-47bc-404d-9b43-9ab0dcc09125\" (UID: \"7d303d31-47bc-404d-9b43-9ab0dcc09125\") " Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.705152 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-utilities" (OuterVolumeSpecName: "utilities") pod "7d303d31-47bc-404d-9b43-9ab0dcc09125" (UID: "7d303d31-47bc-404d-9b43-9ab0dcc09125"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.709818 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d303d31-47bc-404d-9b43-9ab0dcc09125-kube-api-access-p4spp" (OuterVolumeSpecName: "kube-api-access-p4spp") pod "7d303d31-47bc-404d-9b43-9ab0dcc09125" (UID: "7d303d31-47bc-404d-9b43-9ab0dcc09125"). InnerVolumeSpecName "kube-api-access-p4spp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.806551 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.806588 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4spp\" (UniqueName: \"kubernetes.io/projected/7d303d31-47bc-404d-9b43-9ab0dcc09125-kube-api-access-p4spp\") on node \"crc\" DevicePath \"\"" Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.828465 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7d303d31-47bc-404d-9b43-9ab0dcc09125" (UID: "7d303d31-47bc-404d-9b43-9ab0dcc09125"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:48:59 crc kubenswrapper[4995]: I0120 18:48:59.908135 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d303d31-47bc-404d-9b43-9ab0dcc09125-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.123527 4995 generic.go:334] "Generic (PLEG): container finished" podID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerID="db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1" exitCode=0 Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.123573 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wvfm7" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.123590 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvfm7" event={"ID":"7d303d31-47bc-404d-9b43-9ab0dcc09125","Type":"ContainerDied","Data":"db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1"} Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.123633 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wvfm7" event={"ID":"7d303d31-47bc-404d-9b43-9ab0dcc09125","Type":"ContainerDied","Data":"17784ed093bec05ba90e2b10e2946fedaad59ee77a3ce23651ad96d7652f25aa"} Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.123655 4995 scope.go:117] "RemoveContainer" containerID="db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.145536 4995 scope.go:117] "RemoveContainer" containerID="ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.153735 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wvfm7"] Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.170327 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wvfm7"] Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.181651 4995 scope.go:117] "RemoveContainer" containerID="cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.220492 4995 scope.go:117] "RemoveContainer" containerID="db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1" Jan 20 18:49:00 crc kubenswrapper[4995]: E0120 18:49:00.225574 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1\": container with ID starting with db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1 not found: ID does not exist" containerID="db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.225644 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1"} err="failed to get container status \"db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1\": rpc error: code = NotFound desc = could not find container \"db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1\": container with ID starting with db64845305fe6ce5d4cc69391e8f41c592b8c76a4088c6cc6aa7d9a3b8b170a1 not found: ID does not exist" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.225687 4995 scope.go:117] "RemoveContainer" containerID="ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb" Jan 20 18:49:00 crc kubenswrapper[4995]: E0120 18:49:00.229369 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb\": container with ID starting with ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb not found: ID does not exist" containerID="ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.229571 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb"} err="failed to get container status \"ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb\": rpc error: code = NotFound desc = could not find container \"ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb\": container with ID starting with ebb8adffa540d63f052406fb1a58000f9c1ad5a31a3272c19a08babfeda2fdbb not found: ID does not exist" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.229708 4995 scope.go:117] "RemoveContainer" containerID="cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc" Jan 20 18:49:00 crc kubenswrapper[4995]: E0120 18:49:00.230279 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc\": container with ID starting with cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc not found: ID does not exist" containerID="cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.230517 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc"} err="failed to get container status \"cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc\": rpc error: code = NotFound desc = could not find container \"cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc\": container with ID starting with cf8cec7581aa16eb27c78dfa20725c3a315981153e74c04b6b92e6ab1f6113fc not found: ID does not exist" Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.572039 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:49:00 crc kubenswrapper[4995]: I0120 18:49:00.572429 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:49:02 crc kubenswrapper[4995]: I0120 18:49:02.001385 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" path="/var/lib/kubelet/pods/7d303d31-47bc-404d-9b43-9ab0dcc09125/volumes" Jan 20 18:49:30 crc kubenswrapper[4995]: I0120 18:49:30.571882 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:49:30 crc kubenswrapper[4995]: I0120 18:49:30.572614 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:49:30 crc kubenswrapper[4995]: I0120 18:49:30.572678 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:49:30 crc kubenswrapper[4995]: I0120 18:49:30.573866 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f054a88c349d4ba6283fbb84635cc78c46f3468be5ffe05565554df9f1a4bcdc"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:49:30 crc kubenswrapper[4995]: I0120 18:49:30.574002 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://f054a88c349d4ba6283fbb84635cc78c46f3468be5ffe05565554df9f1a4bcdc" gracePeriod=600 Jan 20 18:49:31 crc kubenswrapper[4995]: I0120 18:49:31.442957 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="f054a88c349d4ba6283fbb84635cc78c46f3468be5ffe05565554df9f1a4bcdc" exitCode=0 Jan 20 18:49:31 crc kubenswrapper[4995]: I0120 18:49:31.443006 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"f054a88c349d4ba6283fbb84635cc78c46f3468be5ffe05565554df9f1a4bcdc"} Jan 20 18:49:31 crc kubenswrapper[4995]: I0120 18:49:31.443595 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5"} Jan 20 18:49:31 crc kubenswrapper[4995]: I0120 18:49:31.443640 4995 scope.go:117] "RemoveContainer" containerID="1e6e4079a84b919b9f2218b14eae15dbd95d526ae115b4741572707de67d0796" Jan 20 18:51:30 crc kubenswrapper[4995]: I0120 18:51:30.571875 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:51:30 crc kubenswrapper[4995]: I0120 18:51:30.572553 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:52:00 crc kubenswrapper[4995]: I0120 18:52:00.571243 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:52:00 crc kubenswrapper[4995]: I0120 18:52:00.571835 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:52:30 crc kubenswrapper[4995]: I0120 18:52:30.572751 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 18:52:30 crc kubenswrapper[4995]: I0120 18:52:30.573941 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 18:52:30 crc kubenswrapper[4995]: I0120 18:52:30.574093 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 18:52:30 crc kubenswrapper[4995]: I0120 18:52:30.575574 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 18:52:30 crc kubenswrapper[4995]: I0120 18:52:30.575672 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" gracePeriod=600 Jan 20 18:52:30 crc kubenswrapper[4995]: E0120 18:52:30.702978 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:52:31 crc kubenswrapper[4995]: I0120 18:52:31.429487 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" exitCode=0 Jan 20 18:52:31 crc kubenswrapper[4995]: I0120 18:52:31.429531 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5"} Jan 20 18:52:31 crc kubenswrapper[4995]: I0120 18:52:31.429841 4995 scope.go:117] "RemoveContainer" containerID="f054a88c349d4ba6283fbb84635cc78c46f3468be5ffe05565554df9f1a4bcdc" Jan 20 18:52:31 crc kubenswrapper[4995]: I0120 18:52:31.430658 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:52:31 crc kubenswrapper[4995]: E0120 18:52:31.431038 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:52:37 crc kubenswrapper[4995]: I0120 18:52:37.363859 4995 scope.go:117] "RemoveContainer" containerID="a8c233a3fdac0d55be08a35d473331ba7059a7684cdeacf1f4a875644bddc936" Jan 20 18:52:37 crc kubenswrapper[4995]: I0120 18:52:37.390977 4995 scope.go:117] "RemoveContainer" containerID="03fe890a78f12e375fde9a4b9077e7d16fe8c47a3af5ccf7d91dbe4e4bdbeb02" Jan 20 18:52:37 crc kubenswrapper[4995]: I0120 18:52:37.438636 4995 scope.go:117] "RemoveContainer" containerID="bd857678cb2347c4c2f882512765d38ae6a84b18be1ac02cad9fbeba8cb82d3e" Jan 20 18:52:44 crc kubenswrapper[4995]: I0120 18:52:44.990089 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:52:44 crc kubenswrapper[4995]: E0120 18:52:44.990852 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:52:56 crc kubenswrapper[4995]: I0120 18:52:56.989601 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:52:56 crc kubenswrapper[4995]: E0120 18:52:56.990337 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:53:12 crc kubenswrapper[4995]: I0120 18:53:12.006385 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:53:12 crc kubenswrapper[4995]: E0120 18:53:12.007241 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:53:24 crc kubenswrapper[4995]: I0120 18:53:24.992438 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:53:24 crc kubenswrapper[4995]: E0120 18:53:24.994421 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:53:36 crc kubenswrapper[4995]: I0120 18:53:36.989996 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:53:36 crc kubenswrapper[4995]: E0120 18:53:36.990871 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:53:49 crc kubenswrapper[4995]: I0120 18:53:49.990884 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:53:49 crc kubenswrapper[4995]: E0120 18:53:49.991565 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:54:04 crc kubenswrapper[4995]: I0120 18:54:04.989732 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:54:04 crc kubenswrapper[4995]: E0120 18:54:04.990518 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:54:19 crc kubenswrapper[4995]: I0120 18:54:19.989905 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:54:19 crc kubenswrapper[4995]: E0120 18:54:19.991822 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:54:32 crc kubenswrapper[4995]: I0120 18:54:32.002856 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:54:32 crc kubenswrapper[4995]: E0120 18:54:32.003719 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:54:42 crc kubenswrapper[4995]: I0120 18:54:42.989724 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:54:42 crc kubenswrapper[4995]: E0120 18:54:42.990491 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:54:57 crc kubenswrapper[4995]: I0120 18:54:57.990669 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:54:57 crc kubenswrapper[4995]: E0120 18:54:57.991570 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:55:11 crc kubenswrapper[4995]: I0120 18:55:11.989553 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:55:11 crc kubenswrapper[4995]: E0120 18:55:11.990649 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:55:24 crc kubenswrapper[4995]: I0120 18:55:24.989239 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:55:24 crc kubenswrapper[4995]: E0120 18:55:24.990117 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.015209 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-747wl"] Jan 20 18:55:35 crc kubenswrapper[4995]: E0120 18:55:35.016455 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerName="extract-utilities" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.016471 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerName="extract-utilities" Jan 20 18:55:35 crc kubenswrapper[4995]: E0120 18:55:35.016495 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerName="extract-content" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.016502 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerName="extract-content" Jan 20 18:55:35 crc kubenswrapper[4995]: E0120 18:55:35.016536 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerName="registry-server" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.016544 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerName="registry-server" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.018232 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d303d31-47bc-404d-9b43-9ab0dcc09125" containerName="registry-server" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.026762 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.064203 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-747wl"] Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.125391 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-catalog-content\") pod \"certified-operators-747wl\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.125480 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkdwg\" (UniqueName: \"kubernetes.io/projected/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-kube-api-access-mkdwg\") pod \"certified-operators-747wl\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.125524 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-utilities\") pod \"certified-operators-747wl\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.226708 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-utilities\") pod \"certified-operators-747wl\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.226893 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-catalog-content\") pod \"certified-operators-747wl\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.226925 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkdwg\" (UniqueName: \"kubernetes.io/projected/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-kube-api-access-mkdwg\") pod \"certified-operators-747wl\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.227665 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-catalog-content\") pod \"certified-operators-747wl\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.227930 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-utilities\") pod \"certified-operators-747wl\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.246371 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkdwg\" (UniqueName: \"kubernetes.io/projected/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-kube-api-access-mkdwg\") pod \"certified-operators-747wl\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.373368 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:35 crc kubenswrapper[4995]: I0120 18:55:35.799966 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-747wl"] Jan 20 18:55:36 crc kubenswrapper[4995]: E0120 18:55:36.172773 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5dc2604e_4fc0_4f5a_9fb6_53eb29338159.slice/crio-conmon-46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5dc2604e_4fc0_4f5a_9fb6_53eb29338159.slice/crio-46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190.scope\": RecentStats: unable to find data in memory cache]" Jan 20 18:55:36 crc kubenswrapper[4995]: I0120 18:55:36.275905 4995 generic.go:334] "Generic (PLEG): container finished" podID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerID="46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190" exitCode=0 Jan 20 18:55:36 crc kubenswrapper[4995]: I0120 18:55:36.275953 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-747wl" event={"ID":"5dc2604e-4fc0-4f5a-9fb6-53eb29338159","Type":"ContainerDied","Data":"46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190"} Jan 20 18:55:36 crc kubenswrapper[4995]: I0120 18:55:36.275978 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-747wl" event={"ID":"5dc2604e-4fc0-4f5a-9fb6-53eb29338159","Type":"ContainerStarted","Data":"303a0657dc9d104aa83ed87d67133474e1db30f5dd6ac29588a840f2dc043ba0"} Jan 20 18:55:36 crc kubenswrapper[4995]: I0120 18:55:36.278492 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 18:55:37 crc kubenswrapper[4995]: I0120 18:55:37.287918 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-747wl" event={"ID":"5dc2604e-4fc0-4f5a-9fb6-53eb29338159","Type":"ContainerStarted","Data":"5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71"} Jan 20 18:55:38 crc kubenswrapper[4995]: I0120 18:55:38.299029 4995 generic.go:334] "Generic (PLEG): container finished" podID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerID="5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71" exitCode=0 Jan 20 18:55:38 crc kubenswrapper[4995]: I0120 18:55:38.299073 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-747wl" event={"ID":"5dc2604e-4fc0-4f5a-9fb6-53eb29338159","Type":"ContainerDied","Data":"5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71"} Jan 20 18:55:39 crc kubenswrapper[4995]: I0120 18:55:39.313106 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-747wl" event={"ID":"5dc2604e-4fc0-4f5a-9fb6-53eb29338159","Type":"ContainerStarted","Data":"018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3"} Jan 20 18:55:39 crc kubenswrapper[4995]: I0120 18:55:39.338115 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-747wl" podStartSLOduration=2.727856687 podStartE2EDuration="5.338093658s" podCreationTimestamp="2026-01-20 18:55:34 +0000 UTC" firstStartedPulling="2026-01-20 18:55:36.278036065 +0000 UTC m=+8654.522640901" lastFinishedPulling="2026-01-20 18:55:38.888273066 +0000 UTC m=+8657.132877872" observedRunningTime="2026-01-20 18:55:39.334055929 +0000 UTC m=+8657.578660735" watchObservedRunningTime="2026-01-20 18:55:39.338093658 +0000 UTC m=+8657.582698464" Jan 20 18:55:39 crc kubenswrapper[4995]: I0120 18:55:39.989554 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:55:39 crc kubenswrapper[4995]: E0120 18:55:39.989884 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:55:45 crc kubenswrapper[4995]: I0120 18:55:45.374535 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:45 crc kubenswrapper[4995]: I0120 18:55:45.375618 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:45 crc kubenswrapper[4995]: I0120 18:55:45.419118 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:46 crc kubenswrapper[4995]: I0120 18:55:46.431208 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:46 crc kubenswrapper[4995]: I0120 18:55:46.504737 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-747wl"] Jan 20 18:55:48 crc kubenswrapper[4995]: I0120 18:55:48.392935 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-747wl" podUID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerName="registry-server" containerID="cri-o://018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3" gracePeriod=2 Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.381058 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.424067 4995 generic.go:334] "Generic (PLEG): container finished" podID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerID="018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3" exitCode=0 Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.424397 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-747wl" event={"ID":"5dc2604e-4fc0-4f5a-9fb6-53eb29338159","Type":"ContainerDied","Data":"018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3"} Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.424424 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-747wl" event={"ID":"5dc2604e-4fc0-4f5a-9fb6-53eb29338159","Type":"ContainerDied","Data":"303a0657dc9d104aa83ed87d67133474e1db30f5dd6ac29588a840f2dc043ba0"} Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.424441 4995 scope.go:117] "RemoveContainer" containerID="018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.424587 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-747wl" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.462855 4995 scope.go:117] "RemoveContainer" containerID="5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.496549 4995 scope.go:117] "RemoveContainer" containerID="46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.515238 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-utilities\") pod \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.515321 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-catalog-content\") pod \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.515363 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkdwg\" (UniqueName: \"kubernetes.io/projected/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-kube-api-access-mkdwg\") pod \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\" (UID: \"5dc2604e-4fc0-4f5a-9fb6-53eb29338159\") " Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.517164 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-utilities" (OuterVolumeSpecName: "utilities") pod "5dc2604e-4fc0-4f5a-9fb6-53eb29338159" (UID: "5dc2604e-4fc0-4f5a-9fb6-53eb29338159"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.529180 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-kube-api-access-mkdwg" (OuterVolumeSpecName: "kube-api-access-mkdwg") pod "5dc2604e-4fc0-4f5a-9fb6-53eb29338159" (UID: "5dc2604e-4fc0-4f5a-9fb6-53eb29338159"). InnerVolumeSpecName "kube-api-access-mkdwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.544628 4995 scope.go:117] "RemoveContainer" containerID="018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3" Jan 20 18:55:49 crc kubenswrapper[4995]: E0120 18:55:49.545455 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3\": container with ID starting with 018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3 not found: ID does not exist" containerID="018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.545507 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3"} err="failed to get container status \"018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3\": rpc error: code = NotFound desc = could not find container \"018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3\": container with ID starting with 018ae2e10b384cfca0278c02a7e5f96862d9fda1cd72c554d1e8ac3fab27b0e3 not found: ID does not exist" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.545537 4995 scope.go:117] "RemoveContainer" containerID="5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71" Jan 20 18:55:49 crc kubenswrapper[4995]: E0120 18:55:49.545986 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71\": container with ID starting with 5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71 not found: ID does not exist" containerID="5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.546014 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71"} err="failed to get container status \"5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71\": rpc error: code = NotFound desc = could not find container \"5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71\": container with ID starting with 5157f4d6993771e36045d80ffdc824a83b8ac27b7780e51cfc4532af31584c71 not found: ID does not exist" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.546031 4995 scope.go:117] "RemoveContainer" containerID="46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190" Jan 20 18:55:49 crc kubenswrapper[4995]: E0120 18:55:49.548319 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190\": container with ID starting with 46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190 not found: ID does not exist" containerID="46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.548374 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190"} err="failed to get container status \"46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190\": rpc error: code = NotFound desc = could not find container \"46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190\": container with ID starting with 46ab10e3f999a57337d3ccadf2a44002ac6be3fd67117bc01c7fcaed97120190 not found: ID does not exist" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.564608 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5dc2604e-4fc0-4f5a-9fb6-53eb29338159" (UID: "5dc2604e-4fc0-4f5a-9fb6-53eb29338159"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.617184 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.617226 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.617239 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkdwg\" (UniqueName: \"kubernetes.io/projected/5dc2604e-4fc0-4f5a-9fb6-53eb29338159-kube-api-access-mkdwg\") on node \"crc\" DevicePath \"\"" Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.776609 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-747wl"] Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.816247 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-747wl"] Jan 20 18:55:49 crc kubenswrapper[4995]: I0120 18:55:49.999845 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" path="/var/lib/kubelet/pods/5dc2604e-4fc0-4f5a-9fb6-53eb29338159/volumes" Jan 20 18:55:53 crc kubenswrapper[4995]: I0120 18:55:53.990119 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:55:53 crc kubenswrapper[4995]: E0120 18:55:53.990847 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:56:08 crc kubenswrapper[4995]: I0120 18:56:08.989065 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:56:08 crc kubenswrapper[4995]: E0120 18:56:08.990992 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:56:19 crc kubenswrapper[4995]: I0120 18:56:19.955812 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vvjzm"] Jan 20 18:56:19 crc kubenswrapper[4995]: E0120 18:56:19.957854 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerName="extract-utilities" Jan 20 18:56:19 crc kubenswrapper[4995]: I0120 18:56:19.957960 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerName="extract-utilities" Jan 20 18:56:19 crc kubenswrapper[4995]: E0120 18:56:19.958060 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerName="extract-content" Jan 20 18:56:19 crc kubenswrapper[4995]: I0120 18:56:19.958172 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerName="extract-content" Jan 20 18:56:19 crc kubenswrapper[4995]: E0120 18:56:19.958277 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerName="registry-server" Jan 20 18:56:19 crc kubenswrapper[4995]: I0120 18:56:19.958338 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerName="registry-server" Jan 20 18:56:19 crc kubenswrapper[4995]: I0120 18:56:19.958640 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dc2604e-4fc0-4f5a-9fb6-53eb29338159" containerName="registry-server" Jan 20 18:56:19 crc kubenswrapper[4995]: I0120 18:56:19.960314 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:19 crc kubenswrapper[4995]: I0120 18:56:19.986802 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vvjzm"] Jan 20 18:56:19 crc kubenswrapper[4995]: I0120 18:56:19.990523 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:56:19 crc kubenswrapper[4995]: E0120 18:56:19.991970 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.055951 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-catalog-content\") pod \"community-operators-vvjzm\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.056234 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bfvq\" (UniqueName: \"kubernetes.io/projected/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-kube-api-access-8bfvq\") pod \"community-operators-vvjzm\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.056269 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-utilities\") pod \"community-operators-vvjzm\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.157955 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bfvq\" (UniqueName: \"kubernetes.io/projected/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-kube-api-access-8bfvq\") pod \"community-operators-vvjzm\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.158350 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-utilities\") pod \"community-operators-vvjzm\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.158503 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-catalog-content\") pod \"community-operators-vvjzm\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.159620 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-utilities\") pod \"community-operators-vvjzm\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.159778 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-catalog-content\") pod \"community-operators-vvjzm\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.189589 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bfvq\" (UniqueName: \"kubernetes.io/projected/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-kube-api-access-8bfvq\") pod \"community-operators-vvjzm\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.280447 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:20 crc kubenswrapper[4995]: I0120 18:56:20.899916 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vvjzm"] Jan 20 18:56:21 crc kubenswrapper[4995]: I0120 18:56:21.741126 4995 generic.go:334] "Generic (PLEG): container finished" podID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerID="2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554" exitCode=0 Jan 20 18:56:21 crc kubenswrapper[4995]: I0120 18:56:21.741170 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vvjzm" event={"ID":"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b","Type":"ContainerDied","Data":"2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554"} Jan 20 18:56:21 crc kubenswrapper[4995]: I0120 18:56:21.741205 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vvjzm" event={"ID":"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b","Type":"ContainerStarted","Data":"aee366982e7dc60dedfb312cf9ca0ea26d7c1ca98a24d5298cbf291b3a126b5c"} Jan 20 18:56:23 crc kubenswrapper[4995]: I0120 18:56:23.777180 4995 generic.go:334] "Generic (PLEG): container finished" podID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerID="e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2" exitCode=0 Jan 20 18:56:23 crc kubenswrapper[4995]: I0120 18:56:23.777391 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vvjzm" event={"ID":"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b","Type":"ContainerDied","Data":"e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2"} Jan 20 18:56:24 crc kubenswrapper[4995]: I0120 18:56:24.791786 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vvjzm" event={"ID":"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b","Type":"ContainerStarted","Data":"966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564"} Jan 20 18:56:24 crc kubenswrapper[4995]: I0120 18:56:24.833529 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vvjzm" podStartSLOduration=3.163866815 podStartE2EDuration="5.833501858s" podCreationTimestamp="2026-01-20 18:56:19 +0000 UTC" firstStartedPulling="2026-01-20 18:56:21.742701922 +0000 UTC m=+8699.987306728" lastFinishedPulling="2026-01-20 18:56:24.412336975 +0000 UTC m=+8702.656941771" observedRunningTime="2026-01-20 18:56:24.821754503 +0000 UTC m=+8703.066359309" watchObservedRunningTime="2026-01-20 18:56:24.833501858 +0000 UTC m=+8703.078106664" Jan 20 18:56:30 crc kubenswrapper[4995]: I0120 18:56:30.280993 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:30 crc kubenswrapper[4995]: I0120 18:56:30.281654 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:30 crc kubenswrapper[4995]: I0120 18:56:30.339682 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:30 crc kubenswrapper[4995]: I0120 18:56:30.904003 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:30 crc kubenswrapper[4995]: I0120 18:56:30.956625 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vvjzm"] Jan 20 18:56:32 crc kubenswrapper[4995]: I0120 18:56:32.870158 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vvjzm" podUID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerName="registry-server" containerID="cri-o://966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564" gracePeriod=2 Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.767655 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.883914 4995 generic.go:334] "Generic (PLEG): container finished" podID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerID="966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564" exitCode=0 Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.883967 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vvjzm" event={"ID":"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b","Type":"ContainerDied","Data":"966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564"} Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.883998 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vvjzm" event={"ID":"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b","Type":"ContainerDied","Data":"aee366982e7dc60dedfb312cf9ca0ea26d7c1ca98a24d5298cbf291b3a126b5c"} Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.884016 4995 scope.go:117] "RemoveContainer" containerID="966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564" Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.884195 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vvjzm" Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.904576 4995 scope.go:117] "RemoveContainer" containerID="e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2" Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.924052 4995 scope.go:117] "RemoveContainer" containerID="2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554" Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.950146 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-utilities\") pod \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.950298 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-catalog-content\") pod \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.950445 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bfvq\" (UniqueName: \"kubernetes.io/projected/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-kube-api-access-8bfvq\") pod \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\" (UID: \"39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b\") " Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.951436 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-utilities" (OuterVolumeSpecName: "utilities") pod "39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" (UID: "39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:56:33 crc kubenswrapper[4995]: I0120 18:56:33.956346 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-kube-api-access-8bfvq" (OuterVolumeSpecName: "kube-api-access-8bfvq") pod "39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" (UID: "39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b"). InnerVolumeSpecName "kube-api-access-8bfvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.022472 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" (UID: "39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.031280 4995 scope.go:117] "RemoveContainer" containerID="966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564" Jan 20 18:56:34 crc kubenswrapper[4995]: E0120 18:56:34.031835 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564\": container with ID starting with 966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564 not found: ID does not exist" containerID="966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.031877 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564"} err="failed to get container status \"966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564\": rpc error: code = NotFound desc = could not find container \"966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564\": container with ID starting with 966843a8ab9a08ac86101966e118a337c80e650d8eb0071e43709163fec73564 not found: ID does not exist" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.031903 4995 scope.go:117] "RemoveContainer" containerID="e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2" Jan 20 18:56:34 crc kubenswrapper[4995]: E0120 18:56:34.032136 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2\": container with ID starting with e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2 not found: ID does not exist" containerID="e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.032185 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2"} err="failed to get container status \"e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2\": rpc error: code = NotFound desc = could not find container \"e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2\": container with ID starting with e47ec863a8738ce91c8a6fb6e1da8e3c5c2c2c5365a891187805b74ef47589e2 not found: ID does not exist" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.032200 4995 scope.go:117] "RemoveContainer" containerID="2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554" Jan 20 18:56:34 crc kubenswrapper[4995]: E0120 18:56:34.032464 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554\": container with ID starting with 2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554 not found: ID does not exist" containerID="2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.032517 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554"} err="failed to get container status \"2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554\": rpc error: code = NotFound desc = could not find container \"2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554\": container with ID starting with 2084a20e7db831df4470202e08d4c0c7eec9626570f661a6b9bc63eeb7a3f554 not found: ID does not exist" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.053475 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.053509 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bfvq\" (UniqueName: \"kubernetes.io/projected/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-kube-api-access-8bfvq\") on node \"crc\" DevicePath \"\"" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.053521 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.218068 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vvjzm"] Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.228417 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vvjzm"] Jan 20 18:56:34 crc kubenswrapper[4995]: I0120 18:56:34.990435 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:56:34 crc kubenswrapper[4995]: E0120 18:56:34.990664 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:56:36 crc kubenswrapper[4995]: I0120 18:56:36.007300 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" path="/var/lib/kubelet/pods/39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b/volumes" Jan 20 18:56:49 crc kubenswrapper[4995]: I0120 18:56:49.989553 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:56:49 crc kubenswrapper[4995]: E0120 18:56:49.990315 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:57:02 crc kubenswrapper[4995]: I0120 18:57:02.989507 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:57:02 crc kubenswrapper[4995]: E0120 18:57:02.990338 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:57:16 crc kubenswrapper[4995]: I0120 18:57:16.989060 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:57:16 crc kubenswrapper[4995]: E0120 18:57:16.989838 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:57:28 crc kubenswrapper[4995]: I0120 18:57:28.989397 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:57:28 crc kubenswrapper[4995]: E0120 18:57:28.990260 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 18:57:42 crc kubenswrapper[4995]: I0120 18:57:42.990135 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 18:57:43 crc kubenswrapper[4995]: I0120 18:57:43.517273 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"d5c046335aa3aae890b29e903d9fa31b1230c10272583ff8b82e95dc683ff2a9"} Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.650893 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qnltt"] Jan 20 18:59:52 crc kubenswrapper[4995]: E0120 18:59:52.651999 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerName="extract-content" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.652019 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerName="extract-content" Jan 20 18:59:52 crc kubenswrapper[4995]: E0120 18:59:52.652053 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerName="registry-server" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.652062 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerName="registry-server" Jan 20 18:59:52 crc kubenswrapper[4995]: E0120 18:59:52.652091 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerName="extract-utilities" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.652100 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerName="extract-utilities" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.652361 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="39ab7d49-d9dd-4ed2-99f4-e9b70db8be1b" containerName="registry-server" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.663159 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.678599 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qnltt"] Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.757201 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2lhd\" (UniqueName: \"kubernetes.io/projected/f6ce63e5-1071-4f2d-922c-bc0930cdf172-kube-api-access-v2lhd\") pod \"redhat-operators-qnltt\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.757346 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-catalog-content\") pod \"redhat-operators-qnltt\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.757391 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-utilities\") pod \"redhat-operators-qnltt\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.858810 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2lhd\" (UniqueName: \"kubernetes.io/projected/f6ce63e5-1071-4f2d-922c-bc0930cdf172-kube-api-access-v2lhd\") pod \"redhat-operators-qnltt\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.858997 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-catalog-content\") pod \"redhat-operators-qnltt\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.859249 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-utilities\") pod \"redhat-operators-qnltt\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.859728 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-catalog-content\") pod \"redhat-operators-qnltt\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.859854 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-utilities\") pod \"redhat-operators-qnltt\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:52 crc kubenswrapper[4995]: I0120 18:59:52.887558 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2lhd\" (UniqueName: \"kubernetes.io/projected/f6ce63e5-1071-4f2d-922c-bc0930cdf172-kube-api-access-v2lhd\") pod \"redhat-operators-qnltt\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:53 crc kubenswrapper[4995]: I0120 18:59:53.035730 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 18:59:54 crc kubenswrapper[4995]: I0120 18:59:54.178806 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qnltt"] Jan 20 18:59:54 crc kubenswrapper[4995]: I0120 18:59:54.894723 4995 generic.go:334] "Generic (PLEG): container finished" podID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerID="15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2" exitCode=0 Jan 20 18:59:54 crc kubenswrapper[4995]: I0120 18:59:54.895115 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnltt" event={"ID":"f6ce63e5-1071-4f2d-922c-bc0930cdf172","Type":"ContainerDied","Data":"15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2"} Jan 20 18:59:54 crc kubenswrapper[4995]: I0120 18:59:54.895158 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnltt" event={"ID":"f6ce63e5-1071-4f2d-922c-bc0930cdf172","Type":"ContainerStarted","Data":"5fca4a781d5c49c4289da10bd71dc83cf8800a5902a2e053d1f60b6a0702240d"} Jan 20 18:59:56 crc kubenswrapper[4995]: I0120 18:59:56.919924 4995 generic.go:334] "Generic (PLEG): container finished" podID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerID="b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730" exitCode=0 Jan 20 18:59:56 crc kubenswrapper[4995]: I0120 18:59:56.920002 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnltt" event={"ID":"f6ce63e5-1071-4f2d-922c-bc0930cdf172","Type":"ContainerDied","Data":"b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730"} Jan 20 18:59:57 crc kubenswrapper[4995]: I0120 18:59:57.931921 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnltt" event={"ID":"f6ce63e5-1071-4f2d-922c-bc0930cdf172","Type":"ContainerStarted","Data":"80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263"} Jan 20 18:59:57 crc kubenswrapper[4995]: I0120 18:59:57.962847 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qnltt" podStartSLOduration=3.484521101 podStartE2EDuration="5.962825078s" podCreationTimestamp="2026-01-20 18:59:52 +0000 UTC" firstStartedPulling="2026-01-20 18:59:54.89661907 +0000 UTC m=+8913.141223876" lastFinishedPulling="2026-01-20 18:59:57.374923047 +0000 UTC m=+8915.619527853" observedRunningTime="2026-01-20 18:59:57.953319904 +0000 UTC m=+8916.197924710" watchObservedRunningTime="2026-01-20 18:59:57.962825078 +0000 UTC m=+8916.207429884" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.160986 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh"] Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.163091 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.165142 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.165228 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.181105 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh"] Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.338382 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd08b35a-badd-4fce-92f6-17d1344892d7-secret-volume\") pod \"collect-profiles-29482260-xxzbh\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.338459 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfbcg\" (UniqueName: \"kubernetes.io/projected/dd08b35a-badd-4fce-92f6-17d1344892d7-kube-api-access-lfbcg\") pod \"collect-profiles-29482260-xxzbh\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.338520 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd08b35a-badd-4fce-92f6-17d1344892d7-config-volume\") pod \"collect-profiles-29482260-xxzbh\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.440581 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd08b35a-badd-4fce-92f6-17d1344892d7-secret-volume\") pod \"collect-profiles-29482260-xxzbh\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.440896 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfbcg\" (UniqueName: \"kubernetes.io/projected/dd08b35a-badd-4fce-92f6-17d1344892d7-kube-api-access-lfbcg\") pod \"collect-profiles-29482260-xxzbh\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.441041 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd08b35a-badd-4fce-92f6-17d1344892d7-config-volume\") pod \"collect-profiles-29482260-xxzbh\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.442441 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd08b35a-badd-4fce-92f6-17d1344892d7-config-volume\") pod \"collect-profiles-29482260-xxzbh\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.447467 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd08b35a-badd-4fce-92f6-17d1344892d7-secret-volume\") pod \"collect-profiles-29482260-xxzbh\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.463775 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfbcg\" (UniqueName: \"kubernetes.io/projected/dd08b35a-badd-4fce-92f6-17d1344892d7-kube-api-access-lfbcg\") pod \"collect-profiles-29482260-xxzbh\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.487892 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.573165 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:00:00 crc kubenswrapper[4995]: I0120 19:00:00.573247 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:00:01 crc kubenswrapper[4995]: I0120 19:00:01.060900 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh"] Jan 20 19:00:01 crc kubenswrapper[4995]: I0120 19:00:01.980222 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" event={"ID":"dd08b35a-badd-4fce-92f6-17d1344892d7","Type":"ContainerStarted","Data":"6b2813bc9ce2ec0d2b7e1286d7bd3b4a47744085b36f06f15908aceb284be850"} Jan 20 19:00:01 crc kubenswrapper[4995]: I0120 19:00:01.980592 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" event={"ID":"dd08b35a-badd-4fce-92f6-17d1344892d7","Type":"ContainerStarted","Data":"a0da5e23b9a766d413484d29b9e391eb48c213069843b800d92de1afeb5565e6"} Jan 20 19:00:01 crc kubenswrapper[4995]: I0120 19:00:01.998276 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" podStartSLOduration=1.998253612 podStartE2EDuration="1.998253612s" podCreationTimestamp="2026-01-20 19:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:00:01.995166928 +0000 UTC m=+8920.239771734" watchObservedRunningTime="2026-01-20 19:00:01.998253612 +0000 UTC m=+8920.242858418" Jan 20 19:00:03 crc kubenswrapper[4995]: I0120 19:00:03.036230 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 19:00:03 crc kubenswrapper[4995]: I0120 19:00:03.036586 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 19:00:03 crc kubenswrapper[4995]: I0120 19:00:03.997393 4995 generic.go:334] "Generic (PLEG): container finished" podID="dd08b35a-badd-4fce-92f6-17d1344892d7" containerID="6b2813bc9ce2ec0d2b7e1286d7bd3b4a47744085b36f06f15908aceb284be850" exitCode=0 Jan 20 19:00:03 crc kubenswrapper[4995]: I0120 19:00:03.999188 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" event={"ID":"dd08b35a-badd-4fce-92f6-17d1344892d7","Type":"ContainerDied","Data":"6b2813bc9ce2ec0d2b7e1286d7bd3b4a47744085b36f06f15908aceb284be850"} Jan 20 19:00:04 crc kubenswrapper[4995]: I0120 19:00:04.107551 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qnltt" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerName="registry-server" probeResult="failure" output=< Jan 20 19:00:04 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 19:00:04 crc kubenswrapper[4995]: > Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.373628 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.548430 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd08b35a-badd-4fce-92f6-17d1344892d7-secret-volume\") pod \"dd08b35a-badd-4fce-92f6-17d1344892d7\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.548519 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd08b35a-badd-4fce-92f6-17d1344892d7-config-volume\") pod \"dd08b35a-badd-4fce-92f6-17d1344892d7\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.548631 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfbcg\" (UniqueName: \"kubernetes.io/projected/dd08b35a-badd-4fce-92f6-17d1344892d7-kube-api-access-lfbcg\") pod \"dd08b35a-badd-4fce-92f6-17d1344892d7\" (UID: \"dd08b35a-badd-4fce-92f6-17d1344892d7\") " Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.549288 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd08b35a-badd-4fce-92f6-17d1344892d7-config-volume" (OuterVolumeSpecName: "config-volume") pod "dd08b35a-badd-4fce-92f6-17d1344892d7" (UID: "dd08b35a-badd-4fce-92f6-17d1344892d7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.549758 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/dd08b35a-badd-4fce-92f6-17d1344892d7-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.558817 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd08b35a-badd-4fce-92f6-17d1344892d7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "dd08b35a-badd-4fce-92f6-17d1344892d7" (UID: "dd08b35a-badd-4fce-92f6-17d1344892d7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.559032 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd08b35a-badd-4fce-92f6-17d1344892d7-kube-api-access-lfbcg" (OuterVolumeSpecName: "kube-api-access-lfbcg") pod "dd08b35a-badd-4fce-92f6-17d1344892d7" (UID: "dd08b35a-badd-4fce-92f6-17d1344892d7"). InnerVolumeSpecName "kube-api-access-lfbcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.652045 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfbcg\" (UniqueName: \"kubernetes.io/projected/dd08b35a-badd-4fce-92f6-17d1344892d7-kube-api-access-lfbcg\") on node \"crc\" DevicePath \"\"" Jan 20 19:00:05 crc kubenswrapper[4995]: I0120 19:00:05.652131 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/dd08b35a-badd-4fce-92f6-17d1344892d7-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 19:00:06 crc kubenswrapper[4995]: I0120 19:00:06.018979 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" event={"ID":"dd08b35a-badd-4fce-92f6-17d1344892d7","Type":"ContainerDied","Data":"a0da5e23b9a766d413484d29b9e391eb48c213069843b800d92de1afeb5565e6"} Jan 20 19:00:06 crc kubenswrapper[4995]: I0120 19:00:06.019023 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a0da5e23b9a766d413484d29b9e391eb48c213069843b800d92de1afeb5565e6" Jan 20 19:00:06 crc kubenswrapper[4995]: I0120 19:00:06.019118 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482260-xxzbh" Jan 20 19:00:06 crc kubenswrapper[4995]: I0120 19:00:06.094615 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh"] Jan 20 19:00:06 crc kubenswrapper[4995]: I0120 19:00:06.104973 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482215-w96jh"] Jan 20 19:00:08 crc kubenswrapper[4995]: I0120 19:00:08.008277 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53063733-d6a3-485d-8fd7-954a15717d3b" path="/var/lib/kubelet/pods/53063733-d6a3-485d-8fd7-954a15717d3b/volumes" Jan 20 19:00:13 crc kubenswrapper[4995]: I0120 19:00:13.086940 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 19:00:13 crc kubenswrapper[4995]: I0120 19:00:13.145257 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 19:00:13 crc kubenswrapper[4995]: I0120 19:00:13.331028 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qnltt"] Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.098265 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qnltt" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerName="registry-server" containerID="cri-o://80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263" gracePeriod=2 Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.614375 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.767497 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-catalog-content\") pod \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.767721 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2lhd\" (UniqueName: \"kubernetes.io/projected/f6ce63e5-1071-4f2d-922c-bc0930cdf172-kube-api-access-v2lhd\") pod \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.767770 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-utilities\") pod \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\" (UID: \"f6ce63e5-1071-4f2d-922c-bc0930cdf172\") " Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.768491 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-utilities" (OuterVolumeSpecName: "utilities") pod "f6ce63e5-1071-4f2d-922c-bc0930cdf172" (UID: "f6ce63e5-1071-4f2d-922c-bc0930cdf172"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.775857 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6ce63e5-1071-4f2d-922c-bc0930cdf172-kube-api-access-v2lhd" (OuterVolumeSpecName: "kube-api-access-v2lhd") pod "f6ce63e5-1071-4f2d-922c-bc0930cdf172" (UID: "f6ce63e5-1071-4f2d-922c-bc0930cdf172"). InnerVolumeSpecName "kube-api-access-v2lhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.870769 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2lhd\" (UniqueName: \"kubernetes.io/projected/f6ce63e5-1071-4f2d-922c-bc0930cdf172-kube-api-access-v2lhd\") on node \"crc\" DevicePath \"\"" Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.870810 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.884281 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f6ce63e5-1071-4f2d-922c-bc0930cdf172" (UID: "f6ce63e5-1071-4f2d-922c-bc0930cdf172"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:00:15 crc kubenswrapper[4995]: I0120 19:00:15.973270 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f6ce63e5-1071-4f2d-922c-bc0930cdf172-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.111676 4995 generic.go:334] "Generic (PLEG): container finished" podID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerID="80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263" exitCode=0 Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.111726 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnltt" event={"ID":"f6ce63e5-1071-4f2d-922c-bc0930cdf172","Type":"ContainerDied","Data":"80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263"} Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.111746 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qnltt" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.111785 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qnltt" event={"ID":"f6ce63e5-1071-4f2d-922c-bc0930cdf172","Type":"ContainerDied","Data":"5fca4a781d5c49c4289da10bd71dc83cf8800a5902a2e053d1f60b6a0702240d"} Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.111809 4995 scope.go:117] "RemoveContainer" containerID="80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.133743 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qnltt"] Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.144348 4995 scope.go:117] "RemoveContainer" containerID="b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.144850 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qnltt"] Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.176487 4995 scope.go:117] "RemoveContainer" containerID="15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.218147 4995 scope.go:117] "RemoveContainer" containerID="80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263" Jan 20 19:00:16 crc kubenswrapper[4995]: E0120 19:00:16.218577 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263\": container with ID starting with 80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263 not found: ID does not exist" containerID="80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.218620 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263"} err="failed to get container status \"80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263\": rpc error: code = NotFound desc = could not find container \"80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263\": container with ID starting with 80b5aa9a871db678f00d1d7fb43c583ee1a9711b219204ee0e86f26a70f03263 not found: ID does not exist" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.218646 4995 scope.go:117] "RemoveContainer" containerID="b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730" Jan 20 19:00:16 crc kubenswrapper[4995]: E0120 19:00:16.219002 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730\": container with ID starting with b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730 not found: ID does not exist" containerID="b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.219036 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730"} err="failed to get container status \"b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730\": rpc error: code = NotFound desc = could not find container \"b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730\": container with ID starting with b31e9fab6487338852eefb329937600503d9bedd0af95bc5614d1c66c0870730 not found: ID does not exist" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.219056 4995 scope.go:117] "RemoveContainer" containerID="15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2" Jan 20 19:00:16 crc kubenswrapper[4995]: E0120 19:00:16.219378 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2\": container with ID starting with 15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2 not found: ID does not exist" containerID="15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2" Jan 20 19:00:16 crc kubenswrapper[4995]: I0120 19:00:16.219430 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2"} err="failed to get container status \"15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2\": rpc error: code = NotFound desc = could not find container \"15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2\": container with ID starting with 15bbd87f30f452f6bb22b149122deac7161ea6fcfcc4790834d321584b7d20d2 not found: ID does not exist" Jan 20 19:00:18 crc kubenswrapper[4995]: I0120 19:00:18.003051 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" path="/var/lib/kubelet/pods/f6ce63e5-1071-4f2d-922c-bc0930cdf172/volumes" Jan 20 19:00:30 crc kubenswrapper[4995]: I0120 19:00:30.571793 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:00:30 crc kubenswrapper[4995]: I0120 19:00:30.572403 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:00:37 crc kubenswrapper[4995]: I0120 19:00:37.653155 4995 scope.go:117] "RemoveContainer" containerID="77dc5cb54bb5c57d09ff0c9efcc9e84a45b00214f1b2f0bff3d02613d50dc874" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.356795 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-65gpw"] Jan 20 19:00:40 crc kubenswrapper[4995]: E0120 19:00:40.357821 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerName="extract-content" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.357839 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerName="extract-content" Jan 20 19:00:40 crc kubenswrapper[4995]: E0120 19:00:40.357877 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerName="extract-utilities" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.357886 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerName="extract-utilities" Jan 20 19:00:40 crc kubenswrapper[4995]: E0120 19:00:40.357915 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerName="registry-server" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.357922 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerName="registry-server" Jan 20 19:00:40 crc kubenswrapper[4995]: E0120 19:00:40.357950 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd08b35a-badd-4fce-92f6-17d1344892d7" containerName="collect-profiles" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.357958 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd08b35a-badd-4fce-92f6-17d1344892d7" containerName="collect-profiles" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.358394 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd08b35a-badd-4fce-92f6-17d1344892d7" containerName="collect-profiles" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.358419 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6ce63e5-1071-4f2d-922c-bc0930cdf172" containerName="registry-server" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.367462 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.371536 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-65gpw"] Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.464516 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-catalog-content\") pod \"redhat-marketplace-65gpw\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.464752 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mpf7\" (UniqueName: \"kubernetes.io/projected/7de75d4e-21e6-49e0-a20c-58cb198424fc-kube-api-access-9mpf7\") pod \"redhat-marketplace-65gpw\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.464959 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-utilities\") pod \"redhat-marketplace-65gpw\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.566721 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-catalog-content\") pod \"redhat-marketplace-65gpw\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.566860 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mpf7\" (UniqueName: \"kubernetes.io/projected/7de75d4e-21e6-49e0-a20c-58cb198424fc-kube-api-access-9mpf7\") pod \"redhat-marketplace-65gpw\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.566913 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-utilities\") pod \"redhat-marketplace-65gpw\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.567178 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-catalog-content\") pod \"redhat-marketplace-65gpw\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.567471 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-utilities\") pod \"redhat-marketplace-65gpw\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.590494 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mpf7\" (UniqueName: \"kubernetes.io/projected/7de75d4e-21e6-49e0-a20c-58cb198424fc-kube-api-access-9mpf7\") pod \"redhat-marketplace-65gpw\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:40 crc kubenswrapper[4995]: I0120 19:00:40.710427 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:41 crc kubenswrapper[4995]: I0120 19:00:41.267527 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-65gpw"] Jan 20 19:00:41 crc kubenswrapper[4995]: I0120 19:00:41.404774 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65gpw" event={"ID":"7de75d4e-21e6-49e0-a20c-58cb198424fc","Type":"ContainerStarted","Data":"6c3176f5d785089eb87e233452b3ebc18920706fdf8cc54f1ed14ee407280697"} Jan 20 19:00:42 crc kubenswrapper[4995]: I0120 19:00:42.416191 4995 generic.go:334] "Generic (PLEG): container finished" podID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerID="e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e" exitCode=0 Jan 20 19:00:42 crc kubenswrapper[4995]: I0120 19:00:42.417464 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65gpw" event={"ID":"7de75d4e-21e6-49e0-a20c-58cb198424fc","Type":"ContainerDied","Data":"e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e"} Jan 20 19:00:42 crc kubenswrapper[4995]: I0120 19:00:42.419207 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 19:00:44 crc kubenswrapper[4995]: I0120 19:00:44.436016 4995 generic.go:334] "Generic (PLEG): container finished" podID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerID="1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b" exitCode=0 Jan 20 19:00:44 crc kubenswrapper[4995]: I0120 19:00:44.436649 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65gpw" event={"ID":"7de75d4e-21e6-49e0-a20c-58cb198424fc","Type":"ContainerDied","Data":"1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b"} Jan 20 19:00:45 crc kubenswrapper[4995]: I0120 19:00:45.445956 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65gpw" event={"ID":"7de75d4e-21e6-49e0-a20c-58cb198424fc","Type":"ContainerStarted","Data":"b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe"} Jan 20 19:00:45 crc kubenswrapper[4995]: I0120 19:00:45.467341 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-65gpw" podStartSLOduration=3.02712968 podStartE2EDuration="5.467320216s" podCreationTimestamp="2026-01-20 19:00:40 +0000 UTC" firstStartedPulling="2026-01-20 19:00:42.418692059 +0000 UTC m=+8960.663296875" lastFinishedPulling="2026-01-20 19:00:44.858882605 +0000 UTC m=+8963.103487411" observedRunningTime="2026-01-20 19:00:45.462346073 +0000 UTC m=+8963.706950879" watchObservedRunningTime="2026-01-20 19:00:45.467320216 +0000 UTC m=+8963.711925022" Jan 20 19:00:50 crc kubenswrapper[4995]: I0120 19:00:50.711585 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:50 crc kubenswrapper[4995]: I0120 19:00:50.712236 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:50 crc kubenswrapper[4995]: I0120 19:00:50.763772 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:51 crc kubenswrapper[4995]: I0120 19:00:51.576195 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:51 crc kubenswrapper[4995]: I0120 19:00:51.642995 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-65gpw"] Jan 20 19:00:53 crc kubenswrapper[4995]: I0120 19:00:53.537343 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-65gpw" podUID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerName="registry-server" containerID="cri-o://b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe" gracePeriod=2 Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.074635 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.194804 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-utilities\") pod \"7de75d4e-21e6-49e0-a20c-58cb198424fc\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.194930 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mpf7\" (UniqueName: \"kubernetes.io/projected/7de75d4e-21e6-49e0-a20c-58cb198424fc-kube-api-access-9mpf7\") pod \"7de75d4e-21e6-49e0-a20c-58cb198424fc\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.195131 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-catalog-content\") pod \"7de75d4e-21e6-49e0-a20c-58cb198424fc\" (UID: \"7de75d4e-21e6-49e0-a20c-58cb198424fc\") " Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.196581 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-utilities" (OuterVolumeSpecName: "utilities") pod "7de75d4e-21e6-49e0-a20c-58cb198424fc" (UID: "7de75d4e-21e6-49e0-a20c-58cb198424fc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.201317 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7de75d4e-21e6-49e0-a20c-58cb198424fc-kube-api-access-9mpf7" (OuterVolumeSpecName: "kube-api-access-9mpf7") pod "7de75d4e-21e6-49e0-a20c-58cb198424fc" (UID: "7de75d4e-21e6-49e0-a20c-58cb198424fc"). InnerVolumeSpecName "kube-api-access-9mpf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.223865 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7de75d4e-21e6-49e0-a20c-58cb198424fc" (UID: "7de75d4e-21e6-49e0-a20c-58cb198424fc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.296932 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.296966 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7de75d4e-21e6-49e0-a20c-58cb198424fc-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.296976 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mpf7\" (UniqueName: \"kubernetes.io/projected/7de75d4e-21e6-49e0-a20c-58cb198424fc-kube-api-access-9mpf7\") on node \"crc\" DevicePath \"\"" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.571648 4995 generic.go:334] "Generic (PLEG): container finished" podID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerID="b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe" exitCode=0 Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.573670 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65gpw" event={"ID":"7de75d4e-21e6-49e0-a20c-58cb198424fc","Type":"ContainerDied","Data":"b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe"} Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.573778 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65gpw" event={"ID":"7de75d4e-21e6-49e0-a20c-58cb198424fc","Type":"ContainerDied","Data":"6c3176f5d785089eb87e233452b3ebc18920706fdf8cc54f1ed14ee407280697"} Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.573854 4995 scope.go:117] "RemoveContainer" containerID="b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.574153 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-65gpw" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.609146 4995 scope.go:117] "RemoveContainer" containerID="1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.614800 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-65gpw"] Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.659548 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-65gpw"] Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.659693 4995 scope.go:117] "RemoveContainer" containerID="e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.683318 4995 scope.go:117] "RemoveContainer" containerID="b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe" Jan 20 19:00:54 crc kubenswrapper[4995]: E0120 19:00:54.683945 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe\": container with ID starting with b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe not found: ID does not exist" containerID="b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.684073 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe"} err="failed to get container status \"b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe\": rpc error: code = NotFound desc = could not find container \"b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe\": container with ID starting with b35a378a49364a2259cce160358b21cb39a3133068a53c13d10adc9ea3b9c9fe not found: ID does not exist" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.684207 4995 scope.go:117] "RemoveContainer" containerID="1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b" Jan 20 19:00:54 crc kubenswrapper[4995]: E0120 19:00:54.684610 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b\": container with ID starting with 1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b not found: ID does not exist" containerID="1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.684708 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b"} err="failed to get container status \"1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b\": rpc error: code = NotFound desc = could not find container \"1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b\": container with ID starting with 1b144fdd340effca50068c8512d28d9b10d18f184890bec2c78857283ff8803b not found: ID does not exist" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.684786 4995 scope.go:117] "RemoveContainer" containerID="e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e" Jan 20 19:00:54 crc kubenswrapper[4995]: E0120 19:00:54.685055 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e\": container with ID starting with e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e not found: ID does not exist" containerID="e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e" Jan 20 19:00:54 crc kubenswrapper[4995]: I0120 19:00:54.685192 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e"} err="failed to get container status \"e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e\": rpc error: code = NotFound desc = could not find container \"e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e\": container with ID starting with e546b0aac5f8c4617d3c5e9b8e095f238a63abaecb8635805d45bc4a4e2d850e not found: ID does not exist" Jan 20 19:00:56 crc kubenswrapper[4995]: I0120 19:00:56.002644 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7de75d4e-21e6-49e0-a20c-58cb198424fc" path="/var/lib/kubelet/pods/7de75d4e-21e6-49e0-a20c-58cb198424fc/volumes" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.189620 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29482261-xgf8t"] Jan 20 19:01:00 crc kubenswrapper[4995]: E0120 19:01:00.192414 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerName="registry-server" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.192521 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerName="registry-server" Jan 20 19:01:00 crc kubenswrapper[4995]: E0120 19:01:00.192638 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerName="extract-content" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.192710 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerName="extract-content" Jan 20 19:01:00 crc kubenswrapper[4995]: E0120 19:01:00.192837 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerName="extract-utilities" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.192911 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerName="extract-utilities" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.193501 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="7de75d4e-21e6-49e0-a20c-58cb198424fc" containerName="registry-server" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.194747 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.210178 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29482261-xgf8t"] Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.336448 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-config-data\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.336509 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-combined-ca-bundle\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.336546 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nl99s\" (UniqueName: \"kubernetes.io/projected/fe204a2b-9cad-4100-913f-3e8bb41751db-kube-api-access-nl99s\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.336623 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-fernet-keys\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.440543 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-fernet-keys\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.440682 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-config-data\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.440748 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-combined-ca-bundle\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.440786 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nl99s\" (UniqueName: \"kubernetes.io/projected/fe204a2b-9cad-4100-913f-3e8bb41751db-kube-api-access-nl99s\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.447875 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-combined-ca-bundle\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.448530 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-fernet-keys\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.454053 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-config-data\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.464224 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nl99s\" (UniqueName: \"kubernetes.io/projected/fe204a2b-9cad-4100-913f-3e8bb41751db-kube-api-access-nl99s\") pod \"keystone-cron-29482261-xgf8t\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.518993 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.571311 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.571376 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.571424 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.572314 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d5c046335aa3aae890b29e903d9fa31b1230c10272583ff8b82e95dc683ff2a9"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 19:01:00 crc kubenswrapper[4995]: I0120 19:01:00.572379 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://d5c046335aa3aae890b29e903d9fa31b1230c10272583ff8b82e95dc683ff2a9" gracePeriod=600 Jan 20 19:01:01 crc kubenswrapper[4995]: W0120 19:01:01.007191 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe204a2b_9cad_4100_913f_3e8bb41751db.slice/crio-5bcd87b4852018d8bdb39d2c72b802b8b9ed74596eb97fe973788b81ca7f09e6 WatchSource:0}: Error finding container 5bcd87b4852018d8bdb39d2c72b802b8b9ed74596eb97fe973788b81ca7f09e6: Status 404 returned error can't find the container with id 5bcd87b4852018d8bdb39d2c72b802b8b9ed74596eb97fe973788b81ca7f09e6 Jan 20 19:01:01 crc kubenswrapper[4995]: I0120 19:01:01.016957 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29482261-xgf8t"] Jan 20 19:01:01 crc kubenswrapper[4995]: I0120 19:01:01.646354 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482261-xgf8t" event={"ID":"fe204a2b-9cad-4100-913f-3e8bb41751db","Type":"ContainerStarted","Data":"38cd690e0c6515e08020424ed649fbe2234a3bb3fcb8f62a63c9b7c87a13add8"} Jan 20 19:01:01 crc kubenswrapper[4995]: I0120 19:01:01.646748 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482261-xgf8t" event={"ID":"fe204a2b-9cad-4100-913f-3e8bb41751db","Type":"ContainerStarted","Data":"5bcd87b4852018d8bdb39d2c72b802b8b9ed74596eb97fe973788b81ca7f09e6"} Jan 20 19:01:01 crc kubenswrapper[4995]: I0120 19:01:01.649447 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="d5c046335aa3aae890b29e903d9fa31b1230c10272583ff8b82e95dc683ff2a9" exitCode=0 Jan 20 19:01:01 crc kubenswrapper[4995]: I0120 19:01:01.649492 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"d5c046335aa3aae890b29e903d9fa31b1230c10272583ff8b82e95dc683ff2a9"} Jan 20 19:01:01 crc kubenswrapper[4995]: I0120 19:01:01.649531 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa"} Jan 20 19:01:01 crc kubenswrapper[4995]: I0120 19:01:01.649549 4995 scope.go:117] "RemoveContainer" containerID="08e31de61523b052aa41400d9e0a22df9bf7ea12df4a8ea8c0c12f7c51de1aa5" Jan 20 19:01:01 crc kubenswrapper[4995]: I0120 19:01:01.671705 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29482261-xgf8t" podStartSLOduration=1.6716842079999998 podStartE2EDuration="1.671684208s" podCreationTimestamp="2026-01-20 19:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:01:01.664322191 +0000 UTC m=+8979.908927027" watchObservedRunningTime="2026-01-20 19:01:01.671684208 +0000 UTC m=+8979.916289014" Jan 20 19:01:05 crc kubenswrapper[4995]: I0120 19:01:05.699853 4995 generic.go:334] "Generic (PLEG): container finished" podID="fe204a2b-9cad-4100-913f-3e8bb41751db" containerID="38cd690e0c6515e08020424ed649fbe2234a3bb3fcb8f62a63c9b7c87a13add8" exitCode=0 Jan 20 19:01:05 crc kubenswrapper[4995]: I0120 19:01:05.699928 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482261-xgf8t" event={"ID":"fe204a2b-9cad-4100-913f-3e8bb41751db","Type":"ContainerDied","Data":"38cd690e0c6515e08020424ed649fbe2234a3bb3fcb8f62a63c9b7c87a13add8"} Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.123377 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.247048 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-combined-ca-bundle\") pod \"fe204a2b-9cad-4100-913f-3e8bb41751db\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.248200 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-fernet-keys\") pod \"fe204a2b-9cad-4100-913f-3e8bb41751db\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.248270 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nl99s\" (UniqueName: \"kubernetes.io/projected/fe204a2b-9cad-4100-913f-3e8bb41751db-kube-api-access-nl99s\") pod \"fe204a2b-9cad-4100-913f-3e8bb41751db\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.248467 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-config-data\") pod \"fe204a2b-9cad-4100-913f-3e8bb41751db\" (UID: \"fe204a2b-9cad-4100-913f-3e8bb41751db\") " Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.256447 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "fe204a2b-9cad-4100-913f-3e8bb41751db" (UID: "fe204a2b-9cad-4100-913f-3e8bb41751db"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.256535 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe204a2b-9cad-4100-913f-3e8bb41751db-kube-api-access-nl99s" (OuterVolumeSpecName: "kube-api-access-nl99s") pod "fe204a2b-9cad-4100-913f-3e8bb41751db" (UID: "fe204a2b-9cad-4100-913f-3e8bb41751db"). InnerVolumeSpecName "kube-api-access-nl99s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.284939 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fe204a2b-9cad-4100-913f-3e8bb41751db" (UID: "fe204a2b-9cad-4100-913f-3e8bb41751db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.325461 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-config-data" (OuterVolumeSpecName: "config-data") pod "fe204a2b-9cad-4100-913f-3e8bb41751db" (UID: "fe204a2b-9cad-4100-913f-3e8bb41751db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.351776 4995 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.351805 4995 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.351819 4995 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fe204a2b-9cad-4100-913f-3e8bb41751db-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.351835 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nl99s\" (UniqueName: \"kubernetes.io/projected/fe204a2b-9cad-4100-913f-3e8bb41751db-kube-api-access-nl99s\") on node \"crc\" DevicePath \"\"" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.718788 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482261-xgf8t" event={"ID":"fe204a2b-9cad-4100-913f-3e8bb41751db","Type":"ContainerDied","Data":"5bcd87b4852018d8bdb39d2c72b802b8b9ed74596eb97fe973788b81ca7f09e6"} Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.719099 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5bcd87b4852018d8bdb39d2c72b802b8b9ed74596eb97fe973788b81ca7f09e6" Jan 20 19:01:07 crc kubenswrapper[4995]: I0120 19:01:07.718832 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482261-xgf8t" Jan 20 19:03:00 crc kubenswrapper[4995]: I0120 19:03:00.571448 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:03:00 crc kubenswrapper[4995]: I0120 19:03:00.572178 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:03:30 crc kubenswrapper[4995]: I0120 19:03:30.571506 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:03:30 crc kubenswrapper[4995]: I0120 19:03:30.572054 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:04:00 crc kubenswrapper[4995]: I0120 19:04:00.572899 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:04:00 crc kubenswrapper[4995]: I0120 19:04:00.573629 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:04:00 crc kubenswrapper[4995]: I0120 19:04:00.573688 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 19:04:00 crc kubenswrapper[4995]: I0120 19:04:00.574611 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 19:04:00 crc kubenswrapper[4995]: I0120 19:04:00.574674 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" gracePeriod=600 Jan 20 19:04:00 crc kubenswrapper[4995]: E0120 19:04:00.699430 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:04:01 crc kubenswrapper[4995]: I0120 19:04:01.613742 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" exitCode=0 Jan 20 19:04:01 crc kubenswrapper[4995]: I0120 19:04:01.613816 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa"} Jan 20 19:04:01 crc kubenswrapper[4995]: I0120 19:04:01.614215 4995 scope.go:117] "RemoveContainer" containerID="d5c046335aa3aae890b29e903d9fa31b1230c10272583ff8b82e95dc683ff2a9" Jan 20 19:04:01 crc kubenswrapper[4995]: I0120 19:04:01.614858 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:04:01 crc kubenswrapper[4995]: E0120 19:04:01.615118 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:04:13 crc kubenswrapper[4995]: I0120 19:04:13.990047 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:04:13 crc kubenswrapper[4995]: E0120 19:04:13.991321 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:04:28 crc kubenswrapper[4995]: I0120 19:04:28.991476 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:04:28 crc kubenswrapper[4995]: E0120 19:04:28.992544 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:04:40 crc kubenswrapper[4995]: I0120 19:04:40.991589 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:04:40 crc kubenswrapper[4995]: E0120 19:04:40.992613 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:04:53 crc kubenswrapper[4995]: I0120 19:04:53.990468 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:04:53 crc kubenswrapper[4995]: E0120 19:04:53.991670 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:05:04 crc kubenswrapper[4995]: I0120 19:05:04.990346 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:05:04 crc kubenswrapper[4995]: E0120 19:05:04.991402 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:05:15 crc kubenswrapper[4995]: I0120 19:05:15.990048 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:05:15 crc kubenswrapper[4995]: E0120 19:05:15.991821 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:05:30 crc kubenswrapper[4995]: I0120 19:05:30.990696 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:05:30 crc kubenswrapper[4995]: E0120 19:05:30.992891 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:05:42 crc kubenswrapper[4995]: I0120 19:05:42.991529 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:05:42 crc kubenswrapper[4995]: E0120 19:05:42.992330 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:05:57 crc kubenswrapper[4995]: I0120 19:05:57.992294 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:05:57 crc kubenswrapper[4995]: E0120 19:05:57.995093 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:06:08 crc kubenswrapper[4995]: I0120 19:06:08.990026 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:06:08 crc kubenswrapper[4995]: E0120 19:06:08.991334 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:06:22 crc kubenswrapper[4995]: I0120 19:06:22.990441 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:06:22 crc kubenswrapper[4995]: E0120 19:06:22.991354 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:06:36 crc kubenswrapper[4995]: I0120 19:06:36.989905 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:06:36 crc kubenswrapper[4995]: E0120 19:06:36.990973 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:06:48 crc kubenswrapper[4995]: I0120 19:06:48.022378 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:06:48 crc kubenswrapper[4995]: E0120 19:06:48.023442 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.200782 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rzhq4"] Jan 20 19:06:49 crc kubenswrapper[4995]: E0120 19:06:49.201808 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe204a2b-9cad-4100-913f-3e8bb41751db" containerName="keystone-cron" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.201830 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe204a2b-9cad-4100-913f-3e8bb41751db" containerName="keystone-cron" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.202184 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe204a2b-9cad-4100-913f-3e8bb41751db" containerName="keystone-cron" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.206531 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.213549 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rzhq4"] Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.322962 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmlnh\" (UniqueName: \"kubernetes.io/projected/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-kube-api-access-tmlnh\") pod \"certified-operators-rzhq4\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.323032 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-catalog-content\") pod \"certified-operators-rzhq4\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.323055 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-utilities\") pod \"certified-operators-rzhq4\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.424516 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmlnh\" (UniqueName: \"kubernetes.io/projected/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-kube-api-access-tmlnh\") pod \"certified-operators-rzhq4\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.424571 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-catalog-content\") pod \"certified-operators-rzhq4\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.424587 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-utilities\") pod \"certified-operators-rzhq4\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.425062 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-utilities\") pod \"certified-operators-rzhq4\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.425510 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-catalog-content\") pod \"certified-operators-rzhq4\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.733814 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmlnh\" (UniqueName: \"kubernetes.io/projected/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-kube-api-access-tmlnh\") pod \"certified-operators-rzhq4\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:49 crc kubenswrapper[4995]: I0120 19:06:49.833540 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:50 crc kubenswrapper[4995]: I0120 19:06:50.384308 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rzhq4"] Jan 20 19:06:50 crc kubenswrapper[4995]: I0120 19:06:50.567480 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzhq4" event={"ID":"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3","Type":"ContainerStarted","Data":"d258c2228e55a87c75c27b7fdbd6e7b2202ef7e3b0b09aa4d37858eab74ef763"} Jan 20 19:06:50 crc kubenswrapper[4995]: I0120 19:06:50.984206 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-c4dmz"] Jan 20 19:06:50 crc kubenswrapper[4995]: I0120 19:06:50.987943 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.039998 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c4dmz"] Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.166524 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwnd9\" (UniqueName: \"kubernetes.io/projected/81050223-a913-4bca-847c-ecd842dee920-kube-api-access-jwnd9\") pod \"community-operators-c4dmz\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.166596 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-catalog-content\") pod \"community-operators-c4dmz\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.166647 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-utilities\") pod \"community-operators-c4dmz\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.268961 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwnd9\" (UniqueName: \"kubernetes.io/projected/81050223-a913-4bca-847c-ecd842dee920-kube-api-access-jwnd9\") pod \"community-operators-c4dmz\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.269035 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-catalog-content\") pod \"community-operators-c4dmz\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.269106 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-utilities\") pod \"community-operators-c4dmz\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.269505 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-catalog-content\") pod \"community-operators-c4dmz\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.269728 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-utilities\") pod \"community-operators-c4dmz\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.287747 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwnd9\" (UniqueName: \"kubernetes.io/projected/81050223-a913-4bca-847c-ecd842dee920-kube-api-access-jwnd9\") pod \"community-operators-c4dmz\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.323834 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.589171 4995 generic.go:334] "Generic (PLEG): container finished" podID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerID="defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677" exitCode=0 Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.589398 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzhq4" event={"ID":"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3","Type":"ContainerDied","Data":"defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677"} Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.591585 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 19:06:51 crc kubenswrapper[4995]: I0120 19:06:51.946113 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c4dmz"] Jan 20 19:06:51 crc kubenswrapper[4995]: W0120 19:06:51.953639 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81050223_a913_4bca_847c_ecd842dee920.slice/crio-091112da8c16e9204b6b3a351e4dd440b8d1f22ca04aef1c8649dcba1b587d89 WatchSource:0}: Error finding container 091112da8c16e9204b6b3a351e4dd440b8d1f22ca04aef1c8649dcba1b587d89: Status 404 returned error can't find the container with id 091112da8c16e9204b6b3a351e4dd440b8d1f22ca04aef1c8649dcba1b587d89 Jan 20 19:06:52 crc kubenswrapper[4995]: I0120 19:06:52.603588 4995 generic.go:334] "Generic (PLEG): container finished" podID="81050223-a913-4bca-847c-ecd842dee920" containerID="28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6" exitCode=0 Jan 20 19:06:52 crc kubenswrapper[4995]: I0120 19:06:52.604103 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4dmz" event={"ID":"81050223-a913-4bca-847c-ecd842dee920","Type":"ContainerDied","Data":"28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6"} Jan 20 19:06:52 crc kubenswrapper[4995]: I0120 19:06:52.604148 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4dmz" event={"ID":"81050223-a913-4bca-847c-ecd842dee920","Type":"ContainerStarted","Data":"091112da8c16e9204b6b3a351e4dd440b8d1f22ca04aef1c8649dcba1b587d89"} Jan 20 19:06:53 crc kubenswrapper[4995]: I0120 19:06:53.614804 4995 generic.go:334] "Generic (PLEG): container finished" podID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerID="cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758" exitCode=0 Jan 20 19:06:53 crc kubenswrapper[4995]: I0120 19:06:53.615106 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzhq4" event={"ID":"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3","Type":"ContainerDied","Data":"cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758"} Jan 20 19:06:53 crc kubenswrapper[4995]: I0120 19:06:53.619174 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4dmz" event={"ID":"81050223-a913-4bca-847c-ecd842dee920","Type":"ContainerStarted","Data":"03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627"} Jan 20 19:06:53 crc kubenswrapper[4995]: E0120 19:06:53.996035 4995 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81050223_a913_4bca_847c_ecd842dee920.slice/crio-03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627.scope\": RecentStats: unable to find data in memory cache]" Jan 20 19:06:54 crc kubenswrapper[4995]: I0120 19:06:54.634983 4995 generic.go:334] "Generic (PLEG): container finished" podID="81050223-a913-4bca-847c-ecd842dee920" containerID="03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627" exitCode=0 Jan 20 19:06:54 crc kubenswrapper[4995]: I0120 19:06:54.635425 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4dmz" event={"ID":"81050223-a913-4bca-847c-ecd842dee920","Type":"ContainerDied","Data":"03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627"} Jan 20 19:06:54 crc kubenswrapper[4995]: I0120 19:06:54.654193 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzhq4" event={"ID":"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3","Type":"ContainerStarted","Data":"b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750"} Jan 20 19:06:54 crc kubenswrapper[4995]: I0120 19:06:54.684464 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rzhq4" podStartSLOduration=3.103848901 podStartE2EDuration="5.684445368s" podCreationTimestamp="2026-01-20 19:06:49 +0000 UTC" firstStartedPulling="2026-01-20 19:06:51.591363241 +0000 UTC m=+9329.835968047" lastFinishedPulling="2026-01-20 19:06:54.171959668 +0000 UTC m=+9332.416564514" observedRunningTime="2026-01-20 19:06:54.681179919 +0000 UTC m=+9332.925784745" watchObservedRunningTime="2026-01-20 19:06:54.684445368 +0000 UTC m=+9332.929050174" Jan 20 19:06:55 crc kubenswrapper[4995]: I0120 19:06:55.662728 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4dmz" event={"ID":"81050223-a913-4bca-847c-ecd842dee920","Type":"ContainerStarted","Data":"330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff"} Jan 20 19:06:55 crc kubenswrapper[4995]: I0120 19:06:55.681401 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-c4dmz" podStartSLOduration=3.251814177 podStartE2EDuration="5.681384437s" podCreationTimestamp="2026-01-20 19:06:50 +0000 UTC" firstStartedPulling="2026-01-20 19:06:52.605535542 +0000 UTC m=+9330.850140338" lastFinishedPulling="2026-01-20 19:06:55.035105792 +0000 UTC m=+9333.279710598" observedRunningTime="2026-01-20 19:06:55.677380469 +0000 UTC m=+9333.921985275" watchObservedRunningTime="2026-01-20 19:06:55.681384437 +0000 UTC m=+9333.925989253" Jan 20 19:06:59 crc kubenswrapper[4995]: I0120 19:06:59.834223 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:59 crc kubenswrapper[4995]: I0120 19:06:59.834649 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:06:59 crc kubenswrapper[4995]: I0120 19:06:59.891039 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:07:00 crc kubenswrapper[4995]: I0120 19:07:00.771225 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:07:00 crc kubenswrapper[4995]: I0120 19:07:00.816836 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rzhq4"] Jan 20 19:07:01 crc kubenswrapper[4995]: I0120 19:07:01.324773 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:07:01 crc kubenswrapper[4995]: I0120 19:07:01.324946 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:07:01 crc kubenswrapper[4995]: I0120 19:07:01.792463 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:07:02 crc kubenswrapper[4995]: I0120 19:07:02.771961 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rzhq4" podUID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerName="registry-server" containerID="cri-o://b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750" gracePeriod=2 Jan 20 19:07:02 crc kubenswrapper[4995]: I0120 19:07:02.855622 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:07:02 crc kubenswrapper[4995]: I0120 19:07:02.990742 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:07:02 crc kubenswrapper[4995]: E0120 19:07:02.991245 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.248700 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.436372 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-catalog-content\") pod \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.436562 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmlnh\" (UniqueName: \"kubernetes.io/projected/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-kube-api-access-tmlnh\") pod \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.436624 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-utilities\") pod \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\" (UID: \"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3\") " Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.440111 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-utilities" (OuterVolumeSpecName: "utilities") pod "16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" (UID: "16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.446144 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-kube-api-access-tmlnh" (OuterVolumeSpecName: "kube-api-access-tmlnh") pod "16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" (UID: "16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3"). InnerVolumeSpecName "kube-api-access-tmlnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.495575 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" (UID: "16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.540417 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.540465 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmlnh\" (UniqueName: \"kubernetes.io/projected/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-kube-api-access-tmlnh\") on node \"crc\" DevicePath \"\"" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.540487 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.726098 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c4dmz"] Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.791968 4995 generic.go:334] "Generic (PLEG): container finished" podID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerID="b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750" exitCode=0 Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.792091 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rzhq4" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.792050 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzhq4" event={"ID":"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3","Type":"ContainerDied","Data":"b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750"} Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.792236 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzhq4" event={"ID":"16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3","Type":"ContainerDied","Data":"d258c2228e55a87c75c27b7fdbd6e7b2202ef7e3b0b09aa4d37858eab74ef763"} Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.792262 4995 scope.go:117] "RemoveContainer" containerID="b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.837474 4995 scope.go:117] "RemoveContainer" containerID="cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.838818 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rzhq4"] Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.856700 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rzhq4"] Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.872641 4995 scope.go:117] "RemoveContainer" containerID="defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.930189 4995 scope.go:117] "RemoveContainer" containerID="b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750" Jan 20 19:07:03 crc kubenswrapper[4995]: E0120 19:07:03.930626 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750\": container with ID starting with b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750 not found: ID does not exist" containerID="b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.930663 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750"} err="failed to get container status \"b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750\": rpc error: code = NotFound desc = could not find container \"b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750\": container with ID starting with b5b932e9f5d06e61b76a4f28553d76083fd02b2a903a2ec0856f26287255d750 not found: ID does not exist" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.930687 4995 scope.go:117] "RemoveContainer" containerID="cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758" Jan 20 19:07:03 crc kubenswrapper[4995]: E0120 19:07:03.931007 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758\": container with ID starting with cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758 not found: ID does not exist" containerID="cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.931153 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758"} err="failed to get container status \"cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758\": rpc error: code = NotFound desc = could not find container \"cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758\": container with ID starting with cacc76ef3f34077519ca50f7a7fd4ceed17847125e2bcca2ba173f1e160a7758 not found: ID does not exist" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.931257 4995 scope.go:117] "RemoveContainer" containerID="defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677" Jan 20 19:07:03 crc kubenswrapper[4995]: E0120 19:07:03.931701 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677\": container with ID starting with defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677 not found: ID does not exist" containerID="defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677" Jan 20 19:07:03 crc kubenswrapper[4995]: I0120 19:07:03.931738 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677"} err="failed to get container status \"defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677\": rpc error: code = NotFound desc = could not find container \"defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677\": container with ID starting with defb20f6f5fa708684a514360de14bfb0ce42d430984506092230b6136914677 not found: ID does not exist" Jan 20 19:07:04 crc kubenswrapper[4995]: I0120 19:07:04.001380 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" path="/var/lib/kubelet/pods/16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3/volumes" Jan 20 19:07:04 crc kubenswrapper[4995]: I0120 19:07:04.802210 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-c4dmz" podUID="81050223-a913-4bca-847c-ecd842dee920" containerName="registry-server" containerID="cri-o://330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff" gracePeriod=2 Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.287109 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.483601 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-utilities\") pod \"81050223-a913-4bca-847c-ecd842dee920\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.483678 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-catalog-content\") pod \"81050223-a913-4bca-847c-ecd842dee920\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.483760 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwnd9\" (UniqueName: \"kubernetes.io/projected/81050223-a913-4bca-847c-ecd842dee920-kube-api-access-jwnd9\") pod \"81050223-a913-4bca-847c-ecd842dee920\" (UID: \"81050223-a913-4bca-847c-ecd842dee920\") " Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.484464 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-utilities" (OuterVolumeSpecName: "utilities") pod "81050223-a913-4bca-847c-ecd842dee920" (UID: "81050223-a913-4bca-847c-ecd842dee920"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.499991 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81050223-a913-4bca-847c-ecd842dee920-kube-api-access-jwnd9" (OuterVolumeSpecName: "kube-api-access-jwnd9") pod "81050223-a913-4bca-847c-ecd842dee920" (UID: "81050223-a913-4bca-847c-ecd842dee920"). InnerVolumeSpecName "kube-api-access-jwnd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.529759 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "81050223-a913-4bca-847c-ecd842dee920" (UID: "81050223-a913-4bca-847c-ecd842dee920"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.586370 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.586423 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81050223-a913-4bca-847c-ecd842dee920-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.586440 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwnd9\" (UniqueName: \"kubernetes.io/projected/81050223-a913-4bca-847c-ecd842dee920-kube-api-access-jwnd9\") on node \"crc\" DevicePath \"\"" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.819258 4995 generic.go:334] "Generic (PLEG): container finished" podID="81050223-a913-4bca-847c-ecd842dee920" containerID="330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff" exitCode=0 Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.819295 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4dmz" event={"ID":"81050223-a913-4bca-847c-ecd842dee920","Type":"ContainerDied","Data":"330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff"} Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.819322 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c4dmz" event={"ID":"81050223-a913-4bca-847c-ecd842dee920","Type":"ContainerDied","Data":"091112da8c16e9204b6b3a351e4dd440b8d1f22ca04aef1c8649dcba1b587d89"} Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.819340 4995 scope.go:117] "RemoveContainer" containerID="330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.819398 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c4dmz" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.846543 4995 scope.go:117] "RemoveContainer" containerID="03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.876886 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c4dmz"] Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.879733 4995 scope.go:117] "RemoveContainer" containerID="28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.890520 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-c4dmz"] Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.947571 4995 scope.go:117] "RemoveContainer" containerID="330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff" Jan 20 19:07:05 crc kubenswrapper[4995]: E0120 19:07:05.948099 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff\": container with ID starting with 330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff not found: ID does not exist" containerID="330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.948143 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff"} err="failed to get container status \"330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff\": rpc error: code = NotFound desc = could not find container \"330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff\": container with ID starting with 330875bab515f04c759c7f848ae755521105089389553c9cb3eb1cc123abd1ff not found: ID does not exist" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.948168 4995 scope.go:117] "RemoveContainer" containerID="03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627" Jan 20 19:07:05 crc kubenswrapper[4995]: E0120 19:07:05.948607 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627\": container with ID starting with 03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627 not found: ID does not exist" containerID="03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.948637 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627"} err="failed to get container status \"03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627\": rpc error: code = NotFound desc = could not find container \"03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627\": container with ID starting with 03194344b2dda1e3bce70d0d84ee47af85a02ad09920f7ca440cfecb60f5a627 not found: ID does not exist" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.948696 4995 scope.go:117] "RemoveContainer" containerID="28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6" Jan 20 19:07:05 crc kubenswrapper[4995]: E0120 19:07:05.949160 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6\": container with ID starting with 28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6 not found: ID does not exist" containerID="28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6" Jan 20 19:07:05 crc kubenswrapper[4995]: I0120 19:07:05.949212 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6"} err="failed to get container status \"28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6\": rpc error: code = NotFound desc = could not find container \"28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6\": container with ID starting with 28acd50065c7cd3d017105971b9106ba4d91202ae03294b68c1a1eb69fcaccb6 not found: ID does not exist" Jan 20 19:07:06 crc kubenswrapper[4995]: I0120 19:07:06.007418 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81050223-a913-4bca-847c-ecd842dee920" path="/var/lib/kubelet/pods/81050223-a913-4bca-847c-ecd842dee920/volumes" Jan 20 19:07:15 crc kubenswrapper[4995]: I0120 19:07:15.989790 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:07:15 crc kubenswrapper[4995]: E0120 19:07:15.991491 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:07:28 crc kubenswrapper[4995]: I0120 19:07:28.997841 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:07:29 crc kubenswrapper[4995]: E0120 19:07:28.998946 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:07:40 crc kubenswrapper[4995]: I0120 19:07:40.989409 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:07:40 crc kubenswrapper[4995]: E0120 19:07:40.990461 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:07:52 crc kubenswrapper[4995]: I0120 19:07:52.004443 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:07:52 crc kubenswrapper[4995]: E0120 19:07:52.005114 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:08:05 crc kubenswrapper[4995]: I0120 19:08:05.990129 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:08:05 crc kubenswrapper[4995]: E0120 19:08:05.991124 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:08:18 crc kubenswrapper[4995]: I0120 19:08:18.990714 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:08:18 crc kubenswrapper[4995]: E0120 19:08:18.991766 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:08:30 crc kubenswrapper[4995]: I0120 19:08:30.990114 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:08:30 crc kubenswrapper[4995]: E0120 19:08:30.990961 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:08:42 crc kubenswrapper[4995]: I0120 19:08:42.002795 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:08:42 crc kubenswrapper[4995]: E0120 19:08:42.003693 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:08:56 crc kubenswrapper[4995]: I0120 19:08:56.989954 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:08:56 crc kubenswrapper[4995]: E0120 19:08:56.992399 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:09:09 crc kubenswrapper[4995]: I0120 19:09:09.989310 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:09:11 crc kubenswrapper[4995]: I0120 19:09:11.152897 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"9b3df3037ad00ce1efb8d2afaf5739c242409e0f273959e9f48e9ba61a0ff72a"} Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.321876 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hmlsn"] Jan 20 19:10:25 crc kubenswrapper[4995]: E0120 19:10:25.323920 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81050223-a913-4bca-847c-ecd842dee920" containerName="registry-server" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.323952 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="81050223-a913-4bca-847c-ecd842dee920" containerName="registry-server" Jan 20 19:10:25 crc kubenswrapper[4995]: E0120 19:10:25.323976 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81050223-a913-4bca-847c-ecd842dee920" containerName="extract-content" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.323984 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="81050223-a913-4bca-847c-ecd842dee920" containerName="extract-content" Jan 20 19:10:25 crc kubenswrapper[4995]: E0120 19:10:25.323994 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerName="extract-utilities" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.324003 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerName="extract-utilities" Jan 20 19:10:25 crc kubenswrapper[4995]: E0120 19:10:25.324015 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerName="extract-content" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.324022 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerName="extract-content" Jan 20 19:10:25 crc kubenswrapper[4995]: E0120 19:10:25.324036 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerName="registry-server" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.324043 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerName="registry-server" Jan 20 19:10:25 crc kubenswrapper[4995]: E0120 19:10:25.324062 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81050223-a913-4bca-847c-ecd842dee920" containerName="extract-utilities" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.324069 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="81050223-a913-4bca-847c-ecd842dee920" containerName="extract-utilities" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.324390 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="16b47bf1-ca49-4df7-bc73-9ebc09bd9fc3" containerName="registry-server" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.324414 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="81050223-a913-4bca-847c-ecd842dee920" containerName="registry-server" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.327261 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.335769 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hmlsn"] Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.397149 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-utilities\") pod \"redhat-operators-hmlsn\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.397250 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wdvr\" (UniqueName: \"kubernetes.io/projected/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-kube-api-access-2wdvr\") pod \"redhat-operators-hmlsn\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.397481 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-catalog-content\") pod \"redhat-operators-hmlsn\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.499255 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-catalog-content\") pod \"redhat-operators-hmlsn\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.499445 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-utilities\") pod \"redhat-operators-hmlsn\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.499513 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wdvr\" (UniqueName: \"kubernetes.io/projected/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-kube-api-access-2wdvr\") pod \"redhat-operators-hmlsn\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.500436 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-catalog-content\") pod \"redhat-operators-hmlsn\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.500717 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-utilities\") pod \"redhat-operators-hmlsn\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.537154 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wdvr\" (UniqueName: \"kubernetes.io/projected/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-kube-api-access-2wdvr\") pod \"redhat-operators-hmlsn\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:25 crc kubenswrapper[4995]: I0120 19:10:25.649453 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:26 crc kubenswrapper[4995]: I0120 19:10:26.133729 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hmlsn"] Jan 20 19:10:26 crc kubenswrapper[4995]: I0120 19:10:26.952106 4995 generic.go:334] "Generic (PLEG): container finished" podID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerID="8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a" exitCode=0 Jan 20 19:10:26 crc kubenswrapper[4995]: I0120 19:10:26.952250 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hmlsn" event={"ID":"8bbfebf9-3af1-469c-b603-dfcd8207ec3b","Type":"ContainerDied","Data":"8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a"} Jan 20 19:10:26 crc kubenswrapper[4995]: I0120 19:10:26.954059 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hmlsn" event={"ID":"8bbfebf9-3af1-469c-b603-dfcd8207ec3b","Type":"ContainerStarted","Data":"561c374b6bfec971f038cf26c164bfa3023653878205bf3ba351f4a0b512ddda"} Jan 20 19:10:28 crc kubenswrapper[4995]: I0120 19:10:28.979165 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hmlsn" event={"ID":"8bbfebf9-3af1-469c-b603-dfcd8207ec3b","Type":"ContainerStarted","Data":"42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8"} Jan 20 19:10:31 crc kubenswrapper[4995]: I0120 19:10:31.006099 4995 generic.go:334] "Generic (PLEG): container finished" podID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerID="42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8" exitCode=0 Jan 20 19:10:31 crc kubenswrapper[4995]: I0120 19:10:31.006184 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hmlsn" event={"ID":"8bbfebf9-3af1-469c-b603-dfcd8207ec3b","Type":"ContainerDied","Data":"42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8"} Jan 20 19:10:33 crc kubenswrapper[4995]: I0120 19:10:33.030839 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hmlsn" event={"ID":"8bbfebf9-3af1-469c-b603-dfcd8207ec3b","Type":"ContainerStarted","Data":"1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102"} Jan 20 19:10:33 crc kubenswrapper[4995]: I0120 19:10:33.053951 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hmlsn" podStartSLOduration=2.372004163 podStartE2EDuration="8.053933975s" podCreationTimestamp="2026-01-20 19:10:25 +0000 UTC" firstStartedPulling="2026-01-20 19:10:26.954330581 +0000 UTC m=+9545.198935387" lastFinishedPulling="2026-01-20 19:10:32.636260393 +0000 UTC m=+9550.880865199" observedRunningTime="2026-01-20 19:10:33.047674387 +0000 UTC m=+9551.292279193" watchObservedRunningTime="2026-01-20 19:10:33.053933975 +0000 UTC m=+9551.298538781" Jan 20 19:10:35 crc kubenswrapper[4995]: I0120 19:10:35.650834 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:35 crc kubenswrapper[4995]: I0120 19:10:35.651103 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:36 crc kubenswrapper[4995]: I0120 19:10:36.699384 4995 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hmlsn" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerName="registry-server" probeResult="failure" output=< Jan 20 19:10:36 crc kubenswrapper[4995]: timeout: failed to connect service ":50051" within 1s Jan 20 19:10:36 crc kubenswrapper[4995]: > Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.693283 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-c5tfz"] Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.696258 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.726884 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c5tfz"] Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.850212 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk28n\" (UniqueName: \"kubernetes.io/projected/37bca79e-97de-4301-92c8-a0c1266e8fed-kube-api-access-nk28n\") pod \"redhat-marketplace-c5tfz\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.850268 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-catalog-content\") pod \"redhat-marketplace-c5tfz\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.850296 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-utilities\") pod \"redhat-marketplace-c5tfz\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.951709 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk28n\" (UniqueName: \"kubernetes.io/projected/37bca79e-97de-4301-92c8-a0c1266e8fed-kube-api-access-nk28n\") pod \"redhat-marketplace-c5tfz\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.951770 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-catalog-content\") pod \"redhat-marketplace-c5tfz\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.951811 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-utilities\") pod \"redhat-marketplace-c5tfz\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.952266 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-catalog-content\") pod \"redhat-marketplace-c5tfz\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.952393 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-utilities\") pod \"redhat-marketplace-c5tfz\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:40 crc kubenswrapper[4995]: I0120 19:10:40.981259 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk28n\" (UniqueName: \"kubernetes.io/projected/37bca79e-97de-4301-92c8-a0c1266e8fed-kube-api-access-nk28n\") pod \"redhat-marketplace-c5tfz\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:41 crc kubenswrapper[4995]: I0120 19:10:41.027567 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:41 crc kubenswrapper[4995]: I0120 19:10:41.570214 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c5tfz"] Jan 20 19:10:41 crc kubenswrapper[4995]: W0120 19:10:41.571792 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37bca79e_97de_4301_92c8_a0c1266e8fed.slice/crio-917c1ad5959be9efc124b7518512cf571bbd2e1a41c3b4cbb4dd077aa21820d5 WatchSource:0}: Error finding container 917c1ad5959be9efc124b7518512cf571bbd2e1a41c3b4cbb4dd077aa21820d5: Status 404 returned error can't find the container with id 917c1ad5959be9efc124b7518512cf571bbd2e1a41c3b4cbb4dd077aa21820d5 Jan 20 19:10:42 crc kubenswrapper[4995]: I0120 19:10:42.137674 4995 generic.go:334] "Generic (PLEG): container finished" podID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerID="82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228" exitCode=0 Jan 20 19:10:42 crc kubenswrapper[4995]: I0120 19:10:42.138024 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c5tfz" event={"ID":"37bca79e-97de-4301-92c8-a0c1266e8fed","Type":"ContainerDied","Data":"82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228"} Jan 20 19:10:42 crc kubenswrapper[4995]: I0120 19:10:42.138059 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c5tfz" event={"ID":"37bca79e-97de-4301-92c8-a0c1266e8fed","Type":"ContainerStarted","Data":"917c1ad5959be9efc124b7518512cf571bbd2e1a41c3b4cbb4dd077aa21820d5"} Jan 20 19:10:43 crc kubenswrapper[4995]: I0120 19:10:43.152665 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c5tfz" event={"ID":"37bca79e-97de-4301-92c8-a0c1266e8fed","Type":"ContainerStarted","Data":"9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218"} Jan 20 19:10:44 crc kubenswrapper[4995]: I0120 19:10:44.166357 4995 generic.go:334] "Generic (PLEG): container finished" podID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerID="9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218" exitCode=0 Jan 20 19:10:44 crc kubenswrapper[4995]: I0120 19:10:44.166542 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c5tfz" event={"ID":"37bca79e-97de-4301-92c8-a0c1266e8fed","Type":"ContainerDied","Data":"9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218"} Jan 20 19:10:45 crc kubenswrapper[4995]: I0120 19:10:45.179298 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c5tfz" event={"ID":"37bca79e-97de-4301-92c8-a0c1266e8fed","Type":"ContainerStarted","Data":"ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b"} Jan 20 19:10:45 crc kubenswrapper[4995]: I0120 19:10:45.202981 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-c5tfz" podStartSLOduration=2.5053865760000003 podStartE2EDuration="5.202965055s" podCreationTimestamp="2026-01-20 19:10:40 +0000 UTC" firstStartedPulling="2026-01-20 19:10:42.141271343 +0000 UTC m=+9560.385876149" lastFinishedPulling="2026-01-20 19:10:44.838849812 +0000 UTC m=+9563.083454628" observedRunningTime="2026-01-20 19:10:45.19904884 +0000 UTC m=+9563.443653646" watchObservedRunningTime="2026-01-20 19:10:45.202965055 +0000 UTC m=+9563.447569861" Jan 20 19:10:46 crc kubenswrapper[4995]: I0120 19:10:46.473052 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:46 crc kubenswrapper[4995]: I0120 19:10:46.529304 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.066707 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hmlsn"] Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.208527 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hmlsn" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerName="registry-server" containerID="cri-o://1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102" gracePeriod=2 Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.734593 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.838377 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-catalog-content\") pod \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.838650 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wdvr\" (UniqueName: \"kubernetes.io/projected/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-kube-api-access-2wdvr\") pod \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.838801 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-utilities\") pod \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\" (UID: \"8bbfebf9-3af1-469c-b603-dfcd8207ec3b\") " Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.839594 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-utilities" (OuterVolumeSpecName: "utilities") pod "8bbfebf9-3af1-469c-b603-dfcd8207ec3b" (UID: "8bbfebf9-3af1-469c-b603-dfcd8207ec3b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.840353 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.844822 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-kube-api-access-2wdvr" (OuterVolumeSpecName: "kube-api-access-2wdvr") pod "8bbfebf9-3af1-469c-b603-dfcd8207ec3b" (UID: "8bbfebf9-3af1-469c-b603-dfcd8207ec3b"). InnerVolumeSpecName "kube-api-access-2wdvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.942307 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wdvr\" (UniqueName: \"kubernetes.io/projected/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-kube-api-access-2wdvr\") on node \"crc\" DevicePath \"\"" Jan 20 19:10:48 crc kubenswrapper[4995]: I0120 19:10:48.956575 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8bbfebf9-3af1-469c-b603-dfcd8207ec3b" (UID: "8bbfebf9-3af1-469c-b603-dfcd8207ec3b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.043469 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bbfebf9-3af1-469c-b603-dfcd8207ec3b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.226329 4995 generic.go:334] "Generic (PLEG): container finished" podID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerID="1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102" exitCode=0 Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.226372 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hmlsn" event={"ID":"8bbfebf9-3af1-469c-b603-dfcd8207ec3b","Type":"ContainerDied","Data":"1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102"} Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.226410 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hmlsn" event={"ID":"8bbfebf9-3af1-469c-b603-dfcd8207ec3b","Type":"ContainerDied","Data":"561c374b6bfec971f038cf26c164bfa3023653878205bf3ba351f4a0b512ddda"} Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.226429 4995 scope.go:117] "RemoveContainer" containerID="1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.226423 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hmlsn" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.245502 4995 scope.go:117] "RemoveContainer" containerID="42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.275049 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hmlsn"] Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.283321 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hmlsn"] Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.288547 4995 scope.go:117] "RemoveContainer" containerID="8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.339690 4995 scope.go:117] "RemoveContainer" containerID="1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102" Jan 20 19:10:49 crc kubenswrapper[4995]: E0120 19:10:49.341481 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102\": container with ID starting with 1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102 not found: ID does not exist" containerID="1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.341520 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102"} err="failed to get container status \"1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102\": rpc error: code = NotFound desc = could not find container \"1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102\": container with ID starting with 1bf20d7271872fa5bd054c02ee107f59d6151231dd4f24a958f01ab7fe409102 not found: ID does not exist" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.341546 4995 scope.go:117] "RemoveContainer" containerID="42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8" Jan 20 19:10:49 crc kubenswrapper[4995]: E0120 19:10:49.346932 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8\": container with ID starting with 42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8 not found: ID does not exist" containerID="42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.346962 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8"} err="failed to get container status \"42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8\": rpc error: code = NotFound desc = could not find container \"42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8\": container with ID starting with 42f0445d593a3454c139cabb7cf3b7e3b339ba4ec497a6afa31c6c6d6215b8e8 not found: ID does not exist" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.346980 4995 scope.go:117] "RemoveContainer" containerID="8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a" Jan 20 19:10:49 crc kubenswrapper[4995]: E0120 19:10:49.347573 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a\": container with ID starting with 8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a not found: ID does not exist" containerID="8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a" Jan 20 19:10:49 crc kubenswrapper[4995]: I0120 19:10:49.347602 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a"} err="failed to get container status \"8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a\": rpc error: code = NotFound desc = could not find container \"8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a\": container with ID starting with 8d23450d55318a4db73eadd614ed08cf5d36fdfc145983328f919284566e012a not found: ID does not exist" Jan 20 19:10:50 crc kubenswrapper[4995]: I0120 19:10:50.015547 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" path="/var/lib/kubelet/pods/8bbfebf9-3af1-469c-b603-dfcd8207ec3b/volumes" Jan 20 19:10:51 crc kubenswrapper[4995]: I0120 19:10:51.028163 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:51 crc kubenswrapper[4995]: I0120 19:10:51.028466 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:51 crc kubenswrapper[4995]: I0120 19:10:51.091505 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:51 crc kubenswrapper[4995]: I0120 19:10:51.304765 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:52 crc kubenswrapper[4995]: I0120 19:10:52.279143 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c5tfz"] Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.268591 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-c5tfz" podUID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerName="registry-server" containerID="cri-o://ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b" gracePeriod=2 Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.685111 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.739777 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-catalog-content\") pod \"37bca79e-97de-4301-92c8-a0c1266e8fed\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.739822 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-utilities\") pod \"37bca79e-97de-4301-92c8-a0c1266e8fed\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.739850 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk28n\" (UniqueName: \"kubernetes.io/projected/37bca79e-97de-4301-92c8-a0c1266e8fed-kube-api-access-nk28n\") pod \"37bca79e-97de-4301-92c8-a0c1266e8fed\" (UID: \"37bca79e-97de-4301-92c8-a0c1266e8fed\") " Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.740865 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-utilities" (OuterVolumeSpecName: "utilities") pod "37bca79e-97de-4301-92c8-a0c1266e8fed" (UID: "37bca79e-97de-4301-92c8-a0c1266e8fed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.741522 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.753195 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37bca79e-97de-4301-92c8-a0c1266e8fed-kube-api-access-nk28n" (OuterVolumeSpecName: "kube-api-access-nk28n") pod "37bca79e-97de-4301-92c8-a0c1266e8fed" (UID: "37bca79e-97de-4301-92c8-a0c1266e8fed"). InnerVolumeSpecName "kube-api-access-nk28n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.770002 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37bca79e-97de-4301-92c8-a0c1266e8fed" (UID: "37bca79e-97de-4301-92c8-a0c1266e8fed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.843194 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bca79e-97de-4301-92c8-a0c1266e8fed-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:10:53 crc kubenswrapper[4995]: I0120 19:10:53.843227 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk28n\" (UniqueName: \"kubernetes.io/projected/37bca79e-97de-4301-92c8-a0c1266e8fed-kube-api-access-nk28n\") on node \"crc\" DevicePath \"\"" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.277893 4995 generic.go:334] "Generic (PLEG): container finished" podID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerID="ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b" exitCode=0 Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.277961 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c5tfz" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.277980 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c5tfz" event={"ID":"37bca79e-97de-4301-92c8-a0c1266e8fed","Type":"ContainerDied","Data":"ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b"} Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.278366 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c5tfz" event={"ID":"37bca79e-97de-4301-92c8-a0c1266e8fed","Type":"ContainerDied","Data":"917c1ad5959be9efc124b7518512cf571bbd2e1a41c3b4cbb4dd077aa21820d5"} Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.278391 4995 scope.go:117] "RemoveContainer" containerID="ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.302314 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-c5tfz"] Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.307542 4995 scope.go:117] "RemoveContainer" containerID="9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.313915 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-c5tfz"] Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.326796 4995 scope.go:117] "RemoveContainer" containerID="82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.379717 4995 scope.go:117] "RemoveContainer" containerID="ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b" Jan 20 19:10:54 crc kubenswrapper[4995]: E0120 19:10:54.380185 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b\": container with ID starting with ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b not found: ID does not exist" containerID="ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.380227 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b"} err="failed to get container status \"ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b\": rpc error: code = NotFound desc = could not find container \"ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b\": container with ID starting with ca29e88de433dc6fc338066c9ace4f21bee6eec39b6c0a8566f5dc93a527138b not found: ID does not exist" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.380253 4995 scope.go:117] "RemoveContainer" containerID="9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218" Jan 20 19:10:54 crc kubenswrapper[4995]: E0120 19:10:54.380620 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218\": container with ID starting with 9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218 not found: ID does not exist" containerID="9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.380686 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218"} err="failed to get container status \"9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218\": rpc error: code = NotFound desc = could not find container \"9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218\": container with ID starting with 9d922ec7f22476301835a154ba02698e4b6031789e97455e1329ba95395a2218 not found: ID does not exist" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.380716 4995 scope.go:117] "RemoveContainer" containerID="82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228" Jan 20 19:10:54 crc kubenswrapper[4995]: E0120 19:10:54.381204 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228\": container with ID starting with 82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228 not found: ID does not exist" containerID="82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228" Jan 20 19:10:54 crc kubenswrapper[4995]: I0120 19:10:54.381233 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228"} err="failed to get container status \"82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228\": rpc error: code = NotFound desc = could not find container \"82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228\": container with ID starting with 82f6095f825da5c581d2b7f6eba3aed97645ae068eb4da951152af6bb2745228 not found: ID does not exist" Jan 20 19:10:56 crc kubenswrapper[4995]: I0120 19:10:56.002187 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37bca79e-97de-4301-92c8-a0c1266e8fed" path="/var/lib/kubelet/pods/37bca79e-97de-4301-92c8-a0c1266e8fed/volumes" Jan 20 19:11:30 crc kubenswrapper[4995]: I0120 19:11:30.571854 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:11:30 crc kubenswrapper[4995]: I0120 19:11:30.572485 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:12:00 crc kubenswrapper[4995]: I0120 19:12:00.571768 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:12:00 crc kubenswrapper[4995]: I0120 19:12:00.572312 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:12:30 crc kubenswrapper[4995]: I0120 19:12:30.571614 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:12:30 crc kubenswrapper[4995]: I0120 19:12:30.572299 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:12:30 crc kubenswrapper[4995]: I0120 19:12:30.572370 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 19:12:30 crc kubenswrapper[4995]: I0120 19:12:30.573381 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9b3df3037ad00ce1efb8d2afaf5739c242409e0f273959e9f48e9ba61a0ff72a"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 19:12:30 crc kubenswrapper[4995]: I0120 19:12:30.573481 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://9b3df3037ad00ce1efb8d2afaf5739c242409e0f273959e9f48e9ba61a0ff72a" gracePeriod=600 Jan 20 19:12:31 crc kubenswrapper[4995]: I0120 19:12:31.320364 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="9b3df3037ad00ce1efb8d2afaf5739c242409e0f273959e9f48e9ba61a0ff72a" exitCode=0 Jan 20 19:12:31 crc kubenswrapper[4995]: I0120 19:12:31.320452 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"9b3df3037ad00ce1efb8d2afaf5739c242409e0f273959e9f48e9ba61a0ff72a"} Jan 20 19:12:31 crc kubenswrapper[4995]: I0120 19:12:31.320716 4995 scope.go:117] "RemoveContainer" containerID="1407ea91bb82eece6a0225c3671872a3c2d042d89f1c69cbc364c83614fe61fa" Jan 20 19:12:32 crc kubenswrapper[4995]: I0120 19:12:32.337000 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341"} Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.155281 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt"] Jan 20 19:15:00 crc kubenswrapper[4995]: E0120 19:15:00.156288 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerName="extract-utilities" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.156304 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerName="extract-utilities" Jan 20 19:15:00 crc kubenswrapper[4995]: E0120 19:15:00.156321 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerName="extract-utilities" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.156329 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerName="extract-utilities" Jan 20 19:15:00 crc kubenswrapper[4995]: E0120 19:15:00.156341 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerName="registry-server" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.156348 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerName="registry-server" Jan 20 19:15:00 crc kubenswrapper[4995]: E0120 19:15:00.156365 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerName="registry-server" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.156372 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerName="registry-server" Jan 20 19:15:00 crc kubenswrapper[4995]: E0120 19:15:00.156398 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerName="extract-content" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.156405 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerName="extract-content" Jan 20 19:15:00 crc kubenswrapper[4995]: E0120 19:15:00.156425 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerName="extract-content" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.156432 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerName="extract-content" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.156664 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="37bca79e-97de-4301-92c8-a0c1266e8fed" containerName="registry-server" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.156706 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bbfebf9-3af1-469c-b603-dfcd8207ec3b" containerName="registry-server" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.157601 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.160250 4995 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.160504 4995 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.166188 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt"] Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.351823 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77596219-89ca-42ab-9dda-d4b20ccb8855-config-volume\") pod \"collect-profiles-29482275-sl2nt\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.351875 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sdmq\" (UniqueName: \"kubernetes.io/projected/77596219-89ca-42ab-9dda-d4b20ccb8855-kube-api-access-2sdmq\") pod \"collect-profiles-29482275-sl2nt\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.351939 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77596219-89ca-42ab-9dda-d4b20ccb8855-secret-volume\") pod \"collect-profiles-29482275-sl2nt\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.453433 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77596219-89ca-42ab-9dda-d4b20ccb8855-config-volume\") pod \"collect-profiles-29482275-sl2nt\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.453487 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sdmq\" (UniqueName: \"kubernetes.io/projected/77596219-89ca-42ab-9dda-d4b20ccb8855-kube-api-access-2sdmq\") pod \"collect-profiles-29482275-sl2nt\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.453553 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77596219-89ca-42ab-9dda-d4b20ccb8855-secret-volume\") pod \"collect-profiles-29482275-sl2nt\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.454407 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77596219-89ca-42ab-9dda-d4b20ccb8855-config-volume\") pod \"collect-profiles-29482275-sl2nt\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.479272 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77596219-89ca-42ab-9dda-d4b20ccb8855-secret-volume\") pod \"collect-profiles-29482275-sl2nt\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.482468 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sdmq\" (UniqueName: \"kubernetes.io/projected/77596219-89ca-42ab-9dda-d4b20ccb8855-kube-api-access-2sdmq\") pod \"collect-profiles-29482275-sl2nt\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.486605 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.572140 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.572480 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.917340 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt"] Jan 20 19:15:00 crc kubenswrapper[4995]: I0120 19:15:00.940539 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" event={"ID":"77596219-89ca-42ab-9dda-d4b20ccb8855","Type":"ContainerStarted","Data":"aad7eecc16b0bdd8d077e51caf047bc6b57af541084610aa998edce9c26954c4"} Jan 20 19:15:01 crc kubenswrapper[4995]: I0120 19:15:01.952791 4995 generic.go:334] "Generic (PLEG): container finished" podID="77596219-89ca-42ab-9dda-d4b20ccb8855" containerID="ec5bd7eaf2ae30024eb8f1ec4974acf04248b81d06d8d9c1300b1d20142c1733" exitCode=0 Jan 20 19:15:01 crc kubenswrapper[4995]: I0120 19:15:01.953114 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" event={"ID":"77596219-89ca-42ab-9dda-d4b20ccb8855","Type":"ContainerDied","Data":"ec5bd7eaf2ae30024eb8f1ec4974acf04248b81d06d8d9c1300b1d20142c1733"} Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.284861 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.411933 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77596219-89ca-42ab-9dda-d4b20ccb8855-config-volume\") pod \"77596219-89ca-42ab-9dda-d4b20ccb8855\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.411984 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77596219-89ca-42ab-9dda-d4b20ccb8855-secret-volume\") pod \"77596219-89ca-42ab-9dda-d4b20ccb8855\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.412221 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sdmq\" (UniqueName: \"kubernetes.io/projected/77596219-89ca-42ab-9dda-d4b20ccb8855-kube-api-access-2sdmq\") pod \"77596219-89ca-42ab-9dda-d4b20ccb8855\" (UID: \"77596219-89ca-42ab-9dda-d4b20ccb8855\") " Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.412697 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77596219-89ca-42ab-9dda-d4b20ccb8855-config-volume" (OuterVolumeSpecName: "config-volume") pod "77596219-89ca-42ab-9dda-d4b20ccb8855" (UID: "77596219-89ca-42ab-9dda-d4b20ccb8855"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.418066 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77596219-89ca-42ab-9dda-d4b20ccb8855-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "77596219-89ca-42ab-9dda-d4b20ccb8855" (UID: "77596219-89ca-42ab-9dda-d4b20ccb8855"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.430830 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77596219-89ca-42ab-9dda-d4b20ccb8855-kube-api-access-2sdmq" (OuterVolumeSpecName: "kube-api-access-2sdmq") pod "77596219-89ca-42ab-9dda-d4b20ccb8855" (UID: "77596219-89ca-42ab-9dda-d4b20ccb8855"). InnerVolumeSpecName "kube-api-access-2sdmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.515432 4995 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/77596219-89ca-42ab-9dda-d4b20ccb8855-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.515487 4995 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/77596219-89ca-42ab-9dda-d4b20ccb8855-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.515499 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sdmq\" (UniqueName: \"kubernetes.io/projected/77596219-89ca-42ab-9dda-d4b20ccb8855-kube-api-access-2sdmq\") on node \"crc\" DevicePath \"\"" Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.976821 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" event={"ID":"77596219-89ca-42ab-9dda-d4b20ccb8855","Type":"ContainerDied","Data":"aad7eecc16b0bdd8d077e51caf047bc6b57af541084610aa998edce9c26954c4"} Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.977336 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aad7eecc16b0bdd8d077e51caf047bc6b57af541084610aa998edce9c26954c4" Jan 20 19:15:03 crc kubenswrapper[4995]: I0120 19:15:03.976889 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482275-sl2nt" Jan 20 19:15:04 crc kubenswrapper[4995]: I0120 19:15:04.395632 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8"] Jan 20 19:15:04 crc kubenswrapper[4995]: I0120 19:15:04.405520 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482230-h5fw8"] Jan 20 19:15:06 crc kubenswrapper[4995]: I0120 19:15:06.004746 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16764a57-e02b-45c0-8fa1-cd0b409f447d" path="/var/lib/kubelet/pods/16764a57-e02b-45c0-8fa1-cd0b409f447d/volumes" Jan 20 19:15:30 crc kubenswrapper[4995]: I0120 19:15:30.571393 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:15:30 crc kubenswrapper[4995]: I0120 19:15:30.571895 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:15:38 crc kubenswrapper[4995]: I0120 19:15:38.177291 4995 scope.go:117] "RemoveContainer" containerID="80810eb478f48ccd0a81dc32bff651616666a395b80601ed383a969db4fa3629" Jan 20 19:16:00 crc kubenswrapper[4995]: I0120 19:16:00.571314 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:16:00 crc kubenswrapper[4995]: I0120 19:16:00.571949 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:16:00 crc kubenswrapper[4995]: I0120 19:16:00.572000 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 19:16:00 crc kubenswrapper[4995]: I0120 19:16:00.572904 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 19:16:00 crc kubenswrapper[4995]: I0120 19:16:00.572966 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" gracePeriod=600 Jan 20 19:16:00 crc kubenswrapper[4995]: E0120 19:16:00.712786 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:16:01 crc kubenswrapper[4995]: I0120 19:16:01.561328 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" exitCode=0 Jan 20 19:16:01 crc kubenswrapper[4995]: I0120 19:16:01.561432 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341"} Jan 20 19:16:01 crc kubenswrapper[4995]: I0120 19:16:01.561771 4995 scope.go:117] "RemoveContainer" containerID="9b3df3037ad00ce1efb8d2afaf5739c242409e0f273959e9f48e9ba61a0ff72a" Jan 20 19:16:01 crc kubenswrapper[4995]: I0120 19:16:01.563039 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:16:01 crc kubenswrapper[4995]: E0120 19:16:01.563749 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:16:13 crc kubenswrapper[4995]: I0120 19:16:13.991430 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:16:13 crc kubenswrapper[4995]: E0120 19:16:13.992311 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:16:28 crc kubenswrapper[4995]: I0120 19:16:28.989193 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:16:28 crc kubenswrapper[4995]: E0120 19:16:28.990191 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:16:39 crc kubenswrapper[4995]: I0120 19:16:39.992792 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:16:39 crc kubenswrapper[4995]: E0120 19:16:39.994185 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:16:52 crc kubenswrapper[4995]: I0120 19:16:52.990190 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:16:52 crc kubenswrapper[4995]: E0120 19:16:52.993255 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.424631 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wrmfs"] Jan 20 19:16:55 crc kubenswrapper[4995]: E0120 19:16:55.425909 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77596219-89ca-42ab-9dda-d4b20ccb8855" containerName="collect-profiles" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.425940 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="77596219-89ca-42ab-9dda-d4b20ccb8855" containerName="collect-profiles" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.426216 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="77596219-89ca-42ab-9dda-d4b20ccb8855" containerName="collect-profiles" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.429314 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.444024 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wrmfs"] Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.480339 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-utilities\") pod \"certified-operators-wrmfs\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.480405 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-catalog-content\") pod \"certified-operators-wrmfs\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.480454 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxgrg\" (UniqueName: \"kubernetes.io/projected/66ca92ce-4755-40ea-b285-c25d76d501c6-kube-api-access-dxgrg\") pod \"certified-operators-wrmfs\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.582902 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-catalog-content\") pod \"certified-operators-wrmfs\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.582984 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxgrg\" (UniqueName: \"kubernetes.io/projected/66ca92ce-4755-40ea-b285-c25d76d501c6-kube-api-access-dxgrg\") pod \"certified-operators-wrmfs\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.583357 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-utilities\") pod \"certified-operators-wrmfs\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.583738 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-catalog-content\") pod \"certified-operators-wrmfs\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.583785 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-utilities\") pod \"certified-operators-wrmfs\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.611568 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxgrg\" (UniqueName: \"kubernetes.io/projected/66ca92ce-4755-40ea-b285-c25d76d501c6-kube-api-access-dxgrg\") pod \"certified-operators-wrmfs\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:55 crc kubenswrapper[4995]: I0120 19:16:55.765618 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.049246 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-g8z5v"] Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.055051 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.076813 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g8z5v"] Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.093721 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-utilities\") pod \"community-operators-g8z5v\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.093793 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nh8h9\" (UniqueName: \"kubernetes.io/projected/98c71f5d-1781-45f9-83ca-70703f10d66d-kube-api-access-nh8h9\") pod \"community-operators-g8z5v\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.093822 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-catalog-content\") pod \"community-operators-g8z5v\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.198290 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-utilities\") pod \"community-operators-g8z5v\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.198392 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nh8h9\" (UniqueName: \"kubernetes.io/projected/98c71f5d-1781-45f9-83ca-70703f10d66d-kube-api-access-nh8h9\") pod \"community-operators-g8z5v\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.198438 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-catalog-content\") pod \"community-operators-g8z5v\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.199124 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-catalog-content\") pod \"community-operators-g8z5v\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.199289 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-utilities\") pod \"community-operators-g8z5v\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.223699 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nh8h9\" (UniqueName: \"kubernetes.io/projected/98c71f5d-1781-45f9-83ca-70703f10d66d-kube-api-access-nh8h9\") pod \"community-operators-g8z5v\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.403698 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:16:56 crc kubenswrapper[4995]: I0120 19:16:56.455035 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wrmfs"] Jan 20 19:16:56 crc kubenswrapper[4995]: W0120 19:16:56.842233 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66ca92ce_4755_40ea_b285_c25d76d501c6.slice/crio-b1c8e5bbb8fefb4653f332aa2b14295618d7586f53d3ac15807a37e8f3be6ca7 WatchSource:0}: Error finding container b1c8e5bbb8fefb4653f332aa2b14295618d7586f53d3ac15807a37e8f3be6ca7: Status 404 returned error can't find the container with id b1c8e5bbb8fefb4653f332aa2b14295618d7586f53d3ac15807a37e8f3be6ca7 Jan 20 19:16:57 crc kubenswrapper[4995]: I0120 19:16:57.203116 4995 generic.go:334] "Generic (PLEG): container finished" podID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerID="d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690" exitCode=0 Jan 20 19:16:57 crc kubenswrapper[4995]: I0120 19:16:57.203224 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wrmfs" event={"ID":"66ca92ce-4755-40ea-b285-c25d76d501c6","Type":"ContainerDied","Data":"d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690"} Jan 20 19:16:57 crc kubenswrapper[4995]: I0120 19:16:57.203581 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wrmfs" event={"ID":"66ca92ce-4755-40ea-b285-c25d76d501c6","Type":"ContainerStarted","Data":"b1c8e5bbb8fefb4653f332aa2b14295618d7586f53d3ac15807a37e8f3be6ca7"} Jan 20 19:16:57 crc kubenswrapper[4995]: I0120 19:16:57.206905 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 19:16:57 crc kubenswrapper[4995]: W0120 19:16:57.341049 4995 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98c71f5d_1781_45f9_83ca_70703f10d66d.slice/crio-d34e9c0f10a738b68ed792e77367e04f9edc6a6c5e5faaf700a33c76898080fe WatchSource:0}: Error finding container d34e9c0f10a738b68ed792e77367e04f9edc6a6c5e5faaf700a33c76898080fe: Status 404 returned error can't find the container with id d34e9c0f10a738b68ed792e77367e04f9edc6a6c5e5faaf700a33c76898080fe Jan 20 19:16:57 crc kubenswrapper[4995]: I0120 19:16:57.352346 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-g8z5v"] Jan 20 19:16:58 crc kubenswrapper[4995]: I0120 19:16:58.215674 4995 generic.go:334] "Generic (PLEG): container finished" podID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerID="29778e3fa999c9e76891d13962e02020c2993b9cf6421621f8b6811fe411fe7c" exitCode=0 Jan 20 19:16:58 crc kubenswrapper[4995]: I0120 19:16:58.216224 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g8z5v" event={"ID":"98c71f5d-1781-45f9-83ca-70703f10d66d","Type":"ContainerDied","Data":"29778e3fa999c9e76891d13962e02020c2993b9cf6421621f8b6811fe411fe7c"} Jan 20 19:16:58 crc kubenswrapper[4995]: I0120 19:16:58.216251 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g8z5v" event={"ID":"98c71f5d-1781-45f9-83ca-70703f10d66d","Type":"ContainerStarted","Data":"d34e9c0f10a738b68ed792e77367e04f9edc6a6c5e5faaf700a33c76898080fe"} Jan 20 19:16:59 crc kubenswrapper[4995]: I0120 19:16:59.230200 4995 generic.go:334] "Generic (PLEG): container finished" podID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerID="1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4" exitCode=0 Jan 20 19:16:59 crc kubenswrapper[4995]: I0120 19:16:59.230452 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wrmfs" event={"ID":"66ca92ce-4755-40ea-b285-c25d76d501c6","Type":"ContainerDied","Data":"1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4"} Jan 20 19:16:59 crc kubenswrapper[4995]: I0120 19:16:59.235359 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g8z5v" event={"ID":"98c71f5d-1781-45f9-83ca-70703f10d66d","Type":"ContainerStarted","Data":"b3dc4dd272f5bd7a91de27686dfe84b303cc603097535cfb6f58867b6873401c"} Jan 20 19:17:00 crc kubenswrapper[4995]: I0120 19:17:00.245903 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wrmfs" event={"ID":"66ca92ce-4755-40ea-b285-c25d76d501c6","Type":"ContainerStarted","Data":"0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243"} Jan 20 19:17:00 crc kubenswrapper[4995]: I0120 19:17:00.248613 4995 generic.go:334] "Generic (PLEG): container finished" podID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerID="b3dc4dd272f5bd7a91de27686dfe84b303cc603097535cfb6f58867b6873401c" exitCode=0 Jan 20 19:17:00 crc kubenswrapper[4995]: I0120 19:17:00.248686 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g8z5v" event={"ID":"98c71f5d-1781-45f9-83ca-70703f10d66d","Type":"ContainerDied","Data":"b3dc4dd272f5bd7a91de27686dfe84b303cc603097535cfb6f58867b6873401c"} Jan 20 19:17:00 crc kubenswrapper[4995]: I0120 19:17:00.272072 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wrmfs" podStartSLOduration=2.770312969 podStartE2EDuration="5.272047215s" podCreationTimestamp="2026-01-20 19:16:55 +0000 UTC" firstStartedPulling="2026-01-20 19:16:57.20646287 +0000 UTC m=+9935.451067676" lastFinishedPulling="2026-01-20 19:16:59.708197116 +0000 UTC m=+9937.952801922" observedRunningTime="2026-01-20 19:17:00.267719359 +0000 UTC m=+9938.512324165" watchObservedRunningTime="2026-01-20 19:17:00.272047215 +0000 UTC m=+9938.516652041" Jan 20 19:17:01 crc kubenswrapper[4995]: I0120 19:17:01.261279 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g8z5v" event={"ID":"98c71f5d-1781-45f9-83ca-70703f10d66d","Type":"ContainerStarted","Data":"aa346bd864e825fb692e0e5ce78bf088f241ac55f63fadbcd177df77ac2ee3e0"} Jan 20 19:17:01 crc kubenswrapper[4995]: I0120 19:17:01.287423 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-g8z5v" podStartSLOduration=2.833748221 podStartE2EDuration="5.287403566s" podCreationTimestamp="2026-01-20 19:16:56 +0000 UTC" firstStartedPulling="2026-01-20 19:16:58.218208103 +0000 UTC m=+9936.462812899" lastFinishedPulling="2026-01-20 19:17:00.671863438 +0000 UTC m=+9938.916468244" observedRunningTime="2026-01-20 19:17:01.284027685 +0000 UTC m=+9939.528632491" watchObservedRunningTime="2026-01-20 19:17:01.287403566 +0000 UTC m=+9939.532008372" Jan 20 19:17:05 crc kubenswrapper[4995]: I0120 19:17:05.767426 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:17:05 crc kubenswrapper[4995]: I0120 19:17:05.767734 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:17:05 crc kubenswrapper[4995]: I0120 19:17:05.833379 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:17:06 crc kubenswrapper[4995]: I0120 19:17:06.375517 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:17:06 crc kubenswrapper[4995]: I0120 19:17:06.404666 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:17:06 crc kubenswrapper[4995]: I0120 19:17:06.405125 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:17:06 crc kubenswrapper[4995]: I0120 19:17:06.433660 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wrmfs"] Jan 20 19:17:06 crc kubenswrapper[4995]: I0120 19:17:06.460510 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:17:06 crc kubenswrapper[4995]: I0120 19:17:06.989963 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:17:06 crc kubenswrapper[4995]: E0120 19:17:06.990542 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:17:07 crc kubenswrapper[4995]: I0120 19:17:07.401551 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:17:08 crc kubenswrapper[4995]: I0120 19:17:08.344386 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wrmfs" podUID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerName="registry-server" containerID="cri-o://0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243" gracePeriod=2 Jan 20 19:17:08 crc kubenswrapper[4995]: I0120 19:17:08.491680 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g8z5v"] Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.025908 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.216133 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-catalog-content\") pod \"66ca92ce-4755-40ea-b285-c25d76d501c6\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.216316 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-utilities\") pod \"66ca92ce-4755-40ea-b285-c25d76d501c6\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.216490 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dxgrg\" (UniqueName: \"kubernetes.io/projected/66ca92ce-4755-40ea-b285-c25d76d501c6-kube-api-access-dxgrg\") pod \"66ca92ce-4755-40ea-b285-c25d76d501c6\" (UID: \"66ca92ce-4755-40ea-b285-c25d76d501c6\") " Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.218507 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-utilities" (OuterVolumeSpecName: "utilities") pod "66ca92ce-4755-40ea-b285-c25d76d501c6" (UID: "66ca92ce-4755-40ea-b285-c25d76d501c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.228007 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66ca92ce-4755-40ea-b285-c25d76d501c6-kube-api-access-dxgrg" (OuterVolumeSpecName: "kube-api-access-dxgrg") pod "66ca92ce-4755-40ea-b285-c25d76d501c6" (UID: "66ca92ce-4755-40ea-b285-c25d76d501c6"). InnerVolumeSpecName "kube-api-access-dxgrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.277065 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66ca92ce-4755-40ea-b285-c25d76d501c6" (UID: "66ca92ce-4755-40ea-b285-c25d76d501c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.318327 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.318362 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dxgrg\" (UniqueName: \"kubernetes.io/projected/66ca92ce-4755-40ea-b285-c25d76d501c6-kube-api-access-dxgrg\") on node \"crc\" DevicePath \"\"" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.318372 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66ca92ce-4755-40ea-b285-c25d76d501c6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.356972 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wrmfs" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.357092 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wrmfs" event={"ID":"66ca92ce-4755-40ea-b285-c25d76d501c6","Type":"ContainerDied","Data":"0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243"} Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.357163 4995 scope.go:117] "RemoveContainer" containerID="0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.356896 4995 generic.go:334] "Generic (PLEG): container finished" podID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerID="0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243" exitCode=0 Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.362307 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wrmfs" event={"ID":"66ca92ce-4755-40ea-b285-c25d76d501c6","Type":"ContainerDied","Data":"b1c8e5bbb8fefb4653f332aa2b14295618d7586f53d3ac15807a37e8f3be6ca7"} Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.376254 4995 scope.go:117] "RemoveContainer" containerID="1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.399232 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wrmfs"] Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.407587 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wrmfs"] Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.410365 4995 scope.go:117] "RemoveContainer" containerID="d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.430520 4995 scope.go:117] "RemoveContainer" containerID="0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243" Jan 20 19:17:09 crc kubenswrapper[4995]: E0120 19:17:09.430964 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243\": container with ID starting with 0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243 not found: ID does not exist" containerID="0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.431011 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243"} err="failed to get container status \"0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243\": rpc error: code = NotFound desc = could not find container \"0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243\": container with ID starting with 0dbf2e2b921707d5b6c3ac18b774a5c4b5c50cac428255f3a91179eb7cc1e243 not found: ID does not exist" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.431037 4995 scope.go:117] "RemoveContainer" containerID="1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4" Jan 20 19:17:09 crc kubenswrapper[4995]: E0120 19:17:09.431367 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4\": container with ID starting with 1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4 not found: ID does not exist" containerID="1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.431414 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4"} err="failed to get container status \"1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4\": rpc error: code = NotFound desc = could not find container \"1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4\": container with ID starting with 1159c5c732cc7498ba6f2a213d9195528cf20f5a3d4e61e0a2f8a4fde4810ee4 not found: ID does not exist" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.431442 4995 scope.go:117] "RemoveContainer" containerID="d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690" Jan 20 19:17:09 crc kubenswrapper[4995]: E0120 19:17:09.431947 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690\": container with ID starting with d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690 not found: ID does not exist" containerID="d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690" Jan 20 19:17:09 crc kubenswrapper[4995]: I0120 19:17:09.431977 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690"} err="failed to get container status \"d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690\": rpc error: code = NotFound desc = could not find container \"d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690\": container with ID starting with d5eed881086f9f3af9c0dcd1f7ea607755fe6f406e80b1dccea2853e2c09b690 not found: ID does not exist" Jan 20 19:17:10 crc kubenswrapper[4995]: I0120 19:17:10.005923 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66ca92ce-4755-40ea-b285-c25d76d501c6" path="/var/lib/kubelet/pods/66ca92ce-4755-40ea-b285-c25d76d501c6/volumes" Jan 20 19:17:10 crc kubenswrapper[4995]: I0120 19:17:10.372339 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-g8z5v" podUID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerName="registry-server" containerID="cri-o://aa346bd864e825fb692e0e5ce78bf088f241ac55f63fadbcd177df77ac2ee3e0" gracePeriod=2 Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.381702 4995 generic.go:334] "Generic (PLEG): container finished" podID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerID="aa346bd864e825fb692e0e5ce78bf088f241ac55f63fadbcd177df77ac2ee3e0" exitCode=0 Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.381778 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g8z5v" event={"ID":"98c71f5d-1781-45f9-83ca-70703f10d66d","Type":"ContainerDied","Data":"aa346bd864e825fb692e0e5ce78bf088f241ac55f63fadbcd177df77ac2ee3e0"} Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.382044 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-g8z5v" event={"ID":"98c71f5d-1781-45f9-83ca-70703f10d66d","Type":"ContainerDied","Data":"d34e9c0f10a738b68ed792e77367e04f9edc6a6c5e5faaf700a33c76898080fe"} Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.382059 4995 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d34e9c0f10a738b68ed792e77367e04f9edc6a6c5e5faaf700a33c76898080fe" Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.416365 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.568583 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nh8h9\" (UniqueName: \"kubernetes.io/projected/98c71f5d-1781-45f9-83ca-70703f10d66d-kube-api-access-nh8h9\") pod \"98c71f5d-1781-45f9-83ca-70703f10d66d\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.569054 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-catalog-content\") pod \"98c71f5d-1781-45f9-83ca-70703f10d66d\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.569287 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-utilities\") pod \"98c71f5d-1781-45f9-83ca-70703f10d66d\" (UID: \"98c71f5d-1781-45f9-83ca-70703f10d66d\") " Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.570656 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-utilities" (OuterVolumeSpecName: "utilities") pod "98c71f5d-1781-45f9-83ca-70703f10d66d" (UID: "98c71f5d-1781-45f9-83ca-70703f10d66d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.574380 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98c71f5d-1781-45f9-83ca-70703f10d66d-kube-api-access-nh8h9" (OuterVolumeSpecName: "kube-api-access-nh8h9") pod "98c71f5d-1781-45f9-83ca-70703f10d66d" (UID: "98c71f5d-1781-45f9-83ca-70703f10d66d"). InnerVolumeSpecName "kube-api-access-nh8h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.623049 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "98c71f5d-1781-45f9-83ca-70703f10d66d" (UID: "98c71f5d-1781-45f9-83ca-70703f10d66d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.670757 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.670790 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nh8h9\" (UniqueName: \"kubernetes.io/projected/98c71f5d-1781-45f9-83ca-70703f10d66d-kube-api-access-nh8h9\") on node \"crc\" DevicePath \"\"" Jan 20 19:17:11 crc kubenswrapper[4995]: I0120 19:17:11.670800 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98c71f5d-1781-45f9-83ca-70703f10d66d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:17:12 crc kubenswrapper[4995]: I0120 19:17:12.389412 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-g8z5v" Jan 20 19:17:12 crc kubenswrapper[4995]: I0120 19:17:12.414867 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-g8z5v"] Jan 20 19:17:12 crc kubenswrapper[4995]: I0120 19:17:12.424263 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-g8z5v"] Jan 20 19:17:14 crc kubenswrapper[4995]: I0120 19:17:14.007962 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98c71f5d-1781-45f9-83ca-70703f10d66d" path="/var/lib/kubelet/pods/98c71f5d-1781-45f9-83ca-70703f10d66d/volumes" Jan 20 19:17:20 crc kubenswrapper[4995]: I0120 19:17:20.990246 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:17:20 crc kubenswrapper[4995]: E0120 19:17:20.991354 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:17:34 crc kubenswrapper[4995]: I0120 19:17:34.988945 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:17:34 crc kubenswrapper[4995]: E0120 19:17:34.989564 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:17:46 crc kubenswrapper[4995]: I0120 19:17:46.989659 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:17:46 crc kubenswrapper[4995]: E0120 19:17:46.990719 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:17:58 crc kubenswrapper[4995]: I0120 19:17:58.989659 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:17:58 crc kubenswrapper[4995]: E0120 19:17:58.990780 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:18:12 crc kubenswrapper[4995]: I0120 19:18:12.989317 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:18:12 crc kubenswrapper[4995]: E0120 19:18:12.990217 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:18:25 crc kubenswrapper[4995]: I0120 19:18:25.989635 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:18:25 crc kubenswrapper[4995]: E0120 19:18:25.990353 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:18:39 crc kubenswrapper[4995]: I0120 19:18:39.989997 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:18:39 crc kubenswrapper[4995]: E0120 19:18:39.990805 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:18:54 crc kubenswrapper[4995]: I0120 19:18:54.990263 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:18:54 crc kubenswrapper[4995]: E0120 19:18:54.991198 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:19:05 crc kubenswrapper[4995]: I0120 19:19:05.990231 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:19:05 crc kubenswrapper[4995]: E0120 19:19:05.991234 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:19:16 crc kubenswrapper[4995]: I0120 19:19:16.989946 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:19:16 crc kubenswrapper[4995]: E0120 19:19:16.990835 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:19:27 crc kubenswrapper[4995]: I0120 19:19:27.990506 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:19:27 crc kubenswrapper[4995]: E0120 19:19:27.995738 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:19:42 crc kubenswrapper[4995]: I0120 19:19:42.989775 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:19:42 crc kubenswrapper[4995]: E0120 19:19:42.990769 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:19:54 crc kubenswrapper[4995]: I0120 19:19:54.991303 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:19:54 crc kubenswrapper[4995]: E0120 19:19:54.992675 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:20:07 crc kubenswrapper[4995]: I0120 19:20:07.989688 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:20:07 crc kubenswrapper[4995]: E0120 19:20:07.990389 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:20:22 crc kubenswrapper[4995]: I0120 19:20:21.999390 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:20:22 crc kubenswrapper[4995]: E0120 19:20:22.000566 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.035401 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8ttw9"] Jan 20 19:20:31 crc kubenswrapper[4995]: E0120 19:20:31.037660 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerName="registry-server" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.037755 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerName="registry-server" Jan 20 19:20:31 crc kubenswrapper[4995]: E0120 19:20:31.037846 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerName="extract-utilities" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.037917 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerName="extract-utilities" Jan 20 19:20:31 crc kubenswrapper[4995]: E0120 19:20:31.038004 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerName="extract-utilities" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.038091 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerName="extract-utilities" Jan 20 19:20:31 crc kubenswrapper[4995]: E0120 19:20:31.038196 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerName="extract-content" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.038272 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerName="extract-content" Jan 20 19:20:31 crc kubenswrapper[4995]: E0120 19:20:31.038356 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerName="registry-server" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.038432 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerName="registry-server" Jan 20 19:20:31 crc kubenswrapper[4995]: E0120 19:20:31.038510 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerName="extract-content" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.038583 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerName="extract-content" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.038923 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="98c71f5d-1781-45f9-83ca-70703f10d66d" containerName="registry-server" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.039029 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="66ca92ce-4755-40ea-b285-c25d76d501c6" containerName="registry-server" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.040868 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.047650 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8ttw9"] Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.164629 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbwpc\" (UniqueName: \"kubernetes.io/projected/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-kube-api-access-xbwpc\") pod \"redhat-operators-8ttw9\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.165228 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-catalog-content\") pod \"redhat-operators-8ttw9\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.165376 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-utilities\") pod \"redhat-operators-8ttw9\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.267650 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-catalog-content\") pod \"redhat-operators-8ttw9\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.267745 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-utilities\") pod \"redhat-operators-8ttw9\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.267835 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbwpc\" (UniqueName: \"kubernetes.io/projected/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-kube-api-access-xbwpc\") pod \"redhat-operators-8ttw9\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.268673 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-catalog-content\") pod \"redhat-operators-8ttw9\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.268673 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-utilities\") pod \"redhat-operators-8ttw9\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.289920 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbwpc\" (UniqueName: \"kubernetes.io/projected/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-kube-api-access-xbwpc\") pod \"redhat-operators-8ttw9\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:31 crc kubenswrapper[4995]: I0120 19:20:31.362627 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:32 crc kubenswrapper[4995]: I0120 19:20:32.007770 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8ttw9"] Jan 20 19:20:32 crc kubenswrapper[4995]: I0120 19:20:32.363150 4995 generic.go:334] "Generic (PLEG): container finished" podID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerID="0c2830442653766d3ada974bbf7c16e1c71f14133852816677ece3f18ff72913" exitCode=0 Jan 20 19:20:32 crc kubenswrapper[4995]: I0120 19:20:32.363196 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ttw9" event={"ID":"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6","Type":"ContainerDied","Data":"0c2830442653766d3ada974bbf7c16e1c71f14133852816677ece3f18ff72913"} Jan 20 19:20:32 crc kubenswrapper[4995]: I0120 19:20:32.363229 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ttw9" event={"ID":"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6","Type":"ContainerStarted","Data":"1214472e2906a2132749036bd0add0c4c29c558fe66c6ce51c4a2a23d1753894"} Jan 20 19:20:33 crc kubenswrapper[4995]: I0120 19:20:33.994610 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:20:33 crc kubenswrapper[4995]: E0120 19:20:33.996215 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:20:34 crc kubenswrapper[4995]: I0120 19:20:34.384775 4995 generic.go:334] "Generic (PLEG): container finished" podID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerID="89f0f2996fc4fcfa65e98994d84252b94563fa66f63a7cefecfe7c4a8778ba8d" exitCode=0 Jan 20 19:20:34 crc kubenswrapper[4995]: I0120 19:20:34.384867 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ttw9" event={"ID":"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6","Type":"ContainerDied","Data":"89f0f2996fc4fcfa65e98994d84252b94563fa66f63a7cefecfe7c4a8778ba8d"} Jan 20 19:20:35 crc kubenswrapper[4995]: I0120 19:20:35.421553 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ttw9" event={"ID":"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6","Type":"ContainerStarted","Data":"31ca3502a27cf9fad3a93be441cc7c386d167f65158175412ce40bf4bad23541"} Jan 20 19:20:35 crc kubenswrapper[4995]: I0120 19:20:35.449910 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8ttw9" podStartSLOduration=1.950197902 podStartE2EDuration="4.449890144s" podCreationTimestamp="2026-01-20 19:20:31 +0000 UTC" firstStartedPulling="2026-01-20 19:20:32.365321618 +0000 UTC m=+10150.609926424" lastFinishedPulling="2026-01-20 19:20:34.86501386 +0000 UTC m=+10153.109618666" observedRunningTime="2026-01-20 19:20:35.445387664 +0000 UTC m=+10153.689992470" watchObservedRunningTime="2026-01-20 19:20:35.449890144 +0000 UTC m=+10153.694494960" Jan 20 19:20:41 crc kubenswrapper[4995]: I0120 19:20:41.364382 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:41 crc kubenswrapper[4995]: I0120 19:20:41.364903 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:41 crc kubenswrapper[4995]: I0120 19:20:41.423835 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:41 crc kubenswrapper[4995]: I0120 19:20:41.522042 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:41 crc kubenswrapper[4995]: I0120 19:20:41.658424 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8ttw9"] Jan 20 19:20:43 crc kubenswrapper[4995]: I0120 19:20:43.487803 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8ttw9" podUID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerName="registry-server" containerID="cri-o://31ca3502a27cf9fad3a93be441cc7c386d167f65158175412ce40bf4bad23541" gracePeriod=2 Jan 20 19:20:45 crc kubenswrapper[4995]: I0120 19:20:45.512790 4995 generic.go:334] "Generic (PLEG): container finished" podID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerID="31ca3502a27cf9fad3a93be441cc7c386d167f65158175412ce40bf4bad23541" exitCode=0 Jan 20 19:20:45 crc kubenswrapper[4995]: I0120 19:20:45.512866 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ttw9" event={"ID":"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6","Type":"ContainerDied","Data":"31ca3502a27cf9fad3a93be441cc7c386d167f65158175412ce40bf4bad23541"} Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.056999 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.180899 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-utilities\") pod \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.181365 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-catalog-content\") pod \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.181474 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbwpc\" (UniqueName: \"kubernetes.io/projected/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-kube-api-access-xbwpc\") pod \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\" (UID: \"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6\") " Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.182095 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-utilities" (OuterVolumeSpecName: "utilities") pod "8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" (UID: "8047b25b-4fad-48fb-b790-9ef4b7bcb8d6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.187556 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-kube-api-access-xbwpc" (OuterVolumeSpecName: "kube-api-access-xbwpc") pod "8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" (UID: "8047b25b-4fad-48fb-b790-9ef4b7bcb8d6"). InnerVolumeSpecName "kube-api-access-xbwpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.283398 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbwpc\" (UniqueName: \"kubernetes.io/projected/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-kube-api-access-xbwpc\") on node \"crc\" DevicePath \"\"" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.283638 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.306180 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" (UID: "8047b25b-4fad-48fb-b790-9ef4b7bcb8d6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.385184 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.527492 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8ttw9" event={"ID":"8047b25b-4fad-48fb-b790-9ef4b7bcb8d6","Type":"ContainerDied","Data":"1214472e2906a2132749036bd0add0c4c29c558fe66c6ce51c4a2a23d1753894"} Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.527571 4995 scope.go:117] "RemoveContainer" containerID="31ca3502a27cf9fad3a93be441cc7c386d167f65158175412ce40bf4bad23541" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.528023 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8ttw9" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.567786 4995 scope.go:117] "RemoveContainer" containerID="89f0f2996fc4fcfa65e98994d84252b94563fa66f63a7cefecfe7c4a8778ba8d" Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.574905 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8ttw9"] Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.594924 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8ttw9"] Jan 20 19:20:46 crc kubenswrapper[4995]: I0120 19:20:46.604627 4995 scope.go:117] "RemoveContainer" containerID="0c2830442653766d3ada974bbf7c16e1c71f14133852816677ece3f18ff72913" Jan 20 19:20:47 crc kubenswrapper[4995]: I0120 19:20:47.989312 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:20:47 crc kubenswrapper[4995]: E0120 19:20:47.989799 4995 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-ns9m2_openshift-machine-config-operator(80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a)\"" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" Jan 20 19:20:47 crc kubenswrapper[4995]: I0120 19:20:47.998978 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" path="/var/lib/kubelet/pods/8047b25b-4fad-48fb-b790-9ef4b7bcb8d6/volumes" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.740168 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sjpff"] Jan 20 19:20:58 crc kubenswrapper[4995]: E0120 19:20:58.741720 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerName="extract-utilities" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.741743 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerName="extract-utilities" Jan 20 19:20:58 crc kubenswrapper[4995]: E0120 19:20:58.741756 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerName="extract-content" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.741763 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerName="extract-content" Jan 20 19:20:58 crc kubenswrapper[4995]: E0120 19:20:58.741772 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerName="registry-server" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.741779 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerName="registry-server" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.742304 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="8047b25b-4fad-48fb-b790-9ef4b7bcb8d6" containerName="registry-server" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.748017 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.785213 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjpff"] Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.881738 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89x4n\" (UniqueName: \"kubernetes.io/projected/cd7b4176-dc92-4ae8-9b57-75f800461bce-kube-api-access-89x4n\") pod \"redhat-marketplace-sjpff\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.881835 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-catalog-content\") pod \"redhat-marketplace-sjpff\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.882117 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-utilities\") pod \"redhat-marketplace-sjpff\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.983667 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89x4n\" (UniqueName: \"kubernetes.io/projected/cd7b4176-dc92-4ae8-9b57-75f800461bce-kube-api-access-89x4n\") pod \"redhat-marketplace-sjpff\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.983728 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-catalog-content\") pod \"redhat-marketplace-sjpff\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.983793 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-utilities\") pod \"redhat-marketplace-sjpff\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.984398 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-utilities\") pod \"redhat-marketplace-sjpff\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:58 crc kubenswrapper[4995]: I0120 19:20:58.984400 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-catalog-content\") pod \"redhat-marketplace-sjpff\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:59 crc kubenswrapper[4995]: I0120 19:20:59.018903 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89x4n\" (UniqueName: \"kubernetes.io/projected/cd7b4176-dc92-4ae8-9b57-75f800461bce-kube-api-access-89x4n\") pod \"redhat-marketplace-sjpff\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:59 crc kubenswrapper[4995]: I0120 19:20:59.082188 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:20:59 crc kubenswrapper[4995]: I0120 19:20:59.593559 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjpff"] Jan 20 19:20:59 crc kubenswrapper[4995]: I0120 19:20:59.690866 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjpff" event={"ID":"cd7b4176-dc92-4ae8-9b57-75f800461bce","Type":"ContainerStarted","Data":"2b6405d17a9197d062fe4be5203588bf96a86f7accb59ed7d30d138fe1f52cde"} Jan 20 19:21:00 crc kubenswrapper[4995]: I0120 19:21:00.705568 4995 generic.go:334] "Generic (PLEG): container finished" podID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerID="f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d" exitCode=0 Jan 20 19:21:00 crc kubenswrapper[4995]: I0120 19:21:00.705626 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjpff" event={"ID":"cd7b4176-dc92-4ae8-9b57-75f800461bce","Type":"ContainerDied","Data":"f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d"} Jan 20 19:21:01 crc kubenswrapper[4995]: I0120 19:21:01.729779 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjpff" event={"ID":"cd7b4176-dc92-4ae8-9b57-75f800461bce","Type":"ContainerStarted","Data":"b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834"} Jan 20 19:21:02 crc kubenswrapper[4995]: I0120 19:21:02.744438 4995 generic.go:334] "Generic (PLEG): container finished" podID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerID="b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834" exitCode=0 Jan 20 19:21:02 crc kubenswrapper[4995]: I0120 19:21:02.744593 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjpff" event={"ID":"cd7b4176-dc92-4ae8-9b57-75f800461bce","Type":"ContainerDied","Data":"b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834"} Jan 20 19:21:02 crc kubenswrapper[4995]: I0120 19:21:02.990225 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:21:03 crc kubenswrapper[4995]: I0120 19:21:03.758360 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"7cabc77bb95aab9e063862d0faec936d6cc3548390cf6b714d010c108a355cba"} Jan 20 19:21:03 crc kubenswrapper[4995]: I0120 19:21:03.764496 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjpff" event={"ID":"cd7b4176-dc92-4ae8-9b57-75f800461bce","Type":"ContainerStarted","Data":"a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9"} Jan 20 19:21:03 crc kubenswrapper[4995]: I0120 19:21:03.827504 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sjpff" podStartSLOduration=3.290151599 podStartE2EDuration="5.827482252s" podCreationTimestamp="2026-01-20 19:20:58 +0000 UTC" firstStartedPulling="2026-01-20 19:21:00.708565003 +0000 UTC m=+10178.953169809" lastFinishedPulling="2026-01-20 19:21:03.245895646 +0000 UTC m=+10181.490500462" observedRunningTime="2026-01-20 19:21:03.821575923 +0000 UTC m=+10182.066180729" watchObservedRunningTime="2026-01-20 19:21:03.827482252 +0000 UTC m=+10182.072087058" Jan 20 19:21:09 crc kubenswrapper[4995]: I0120 19:21:09.083117 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:21:09 crc kubenswrapper[4995]: I0120 19:21:09.083958 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:21:09 crc kubenswrapper[4995]: I0120 19:21:09.154916 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:21:09 crc kubenswrapper[4995]: I0120 19:21:09.879507 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:21:09 crc kubenswrapper[4995]: I0120 19:21:09.946277 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjpff"] Jan 20 19:21:11 crc kubenswrapper[4995]: I0120 19:21:11.843142 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sjpff" podUID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerName="registry-server" containerID="cri-o://a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9" gracePeriod=2 Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.259552 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.369421 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89x4n\" (UniqueName: \"kubernetes.io/projected/cd7b4176-dc92-4ae8-9b57-75f800461bce-kube-api-access-89x4n\") pod \"cd7b4176-dc92-4ae8-9b57-75f800461bce\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.369495 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-utilities\") pod \"cd7b4176-dc92-4ae8-9b57-75f800461bce\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.369653 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-catalog-content\") pod \"cd7b4176-dc92-4ae8-9b57-75f800461bce\" (UID: \"cd7b4176-dc92-4ae8-9b57-75f800461bce\") " Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.371978 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-utilities" (OuterVolumeSpecName: "utilities") pod "cd7b4176-dc92-4ae8-9b57-75f800461bce" (UID: "cd7b4176-dc92-4ae8-9b57-75f800461bce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.377966 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd7b4176-dc92-4ae8-9b57-75f800461bce-kube-api-access-89x4n" (OuterVolumeSpecName: "kube-api-access-89x4n") pod "cd7b4176-dc92-4ae8-9b57-75f800461bce" (UID: "cd7b4176-dc92-4ae8-9b57-75f800461bce"). InnerVolumeSpecName "kube-api-access-89x4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.392677 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd7b4176-dc92-4ae8-9b57-75f800461bce" (UID: "cd7b4176-dc92-4ae8-9b57-75f800461bce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.471626 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.471810 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89x4n\" (UniqueName: \"kubernetes.io/projected/cd7b4176-dc92-4ae8-9b57-75f800461bce-kube-api-access-89x4n\") on node \"crc\" DevicePath \"\"" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.471903 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd7b4176-dc92-4ae8-9b57-75f800461bce-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.855893 4995 generic.go:334] "Generic (PLEG): container finished" podID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerID="a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9" exitCode=0 Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.855938 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjpff" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.855956 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjpff" event={"ID":"cd7b4176-dc92-4ae8-9b57-75f800461bce","Type":"ContainerDied","Data":"a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9"} Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.857222 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjpff" event={"ID":"cd7b4176-dc92-4ae8-9b57-75f800461bce","Type":"ContainerDied","Data":"2b6405d17a9197d062fe4be5203588bf96a86f7accb59ed7d30d138fe1f52cde"} Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.857265 4995 scope.go:117] "RemoveContainer" containerID="a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.894328 4995 scope.go:117] "RemoveContainer" containerID="b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.903820 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjpff"] Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.911870 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjpff"] Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.919524 4995 scope.go:117] "RemoveContainer" containerID="f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.961646 4995 scope.go:117] "RemoveContainer" containerID="a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9" Jan 20 19:21:12 crc kubenswrapper[4995]: E0120 19:21:12.962213 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9\": container with ID starting with a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9 not found: ID does not exist" containerID="a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.962253 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9"} err="failed to get container status \"a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9\": rpc error: code = NotFound desc = could not find container \"a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9\": container with ID starting with a9719e05d52b5a7783a8ba29175a63bbd69fcb817f9b6d69b1182cc37d994ea9 not found: ID does not exist" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.962281 4995 scope.go:117] "RemoveContainer" containerID="b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834" Jan 20 19:21:12 crc kubenswrapper[4995]: E0120 19:21:12.962661 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834\": container with ID starting with b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834 not found: ID does not exist" containerID="b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.962818 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834"} err="failed to get container status \"b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834\": rpc error: code = NotFound desc = could not find container \"b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834\": container with ID starting with b86620926efda508188232cad8b818931cbfcc2553b55c4ce4c312311be0b834 not found: ID does not exist" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.962886 4995 scope.go:117] "RemoveContainer" containerID="f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d" Jan 20 19:21:12 crc kubenswrapper[4995]: E0120 19:21:12.963220 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d\": container with ID starting with f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d not found: ID does not exist" containerID="f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d" Jan 20 19:21:12 crc kubenswrapper[4995]: I0120 19:21:12.963241 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d"} err="failed to get container status \"f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d\": rpc error: code = NotFound desc = could not find container \"f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d\": container with ID starting with f93b20a983b6401691dd045301a37e938f6e0a6c1ca0cf19c7fb81a6e2ed3a9d not found: ID does not exist" Jan 20 19:21:14 crc kubenswrapper[4995]: I0120 19:21:14.005330 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd7b4176-dc92-4ae8-9b57-75f800461bce" path="/var/lib/kubelet/pods/cd7b4176-dc92-4ae8-9b57-75f800461bce/volumes" Jan 20 19:23:30 crc kubenswrapper[4995]: I0120 19:23:30.572023 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:23:30 crc kubenswrapper[4995]: I0120 19:23:30.572627 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:23:38 crc kubenswrapper[4995]: I0120 19:23:38.473201 4995 scope.go:117] "RemoveContainer" containerID="aa346bd864e825fb692e0e5ce78bf088f241ac55f63fadbcd177df77ac2ee3e0" Jan 20 19:23:38 crc kubenswrapper[4995]: I0120 19:23:38.504545 4995 scope.go:117] "RemoveContainer" containerID="b3dc4dd272f5bd7a91de27686dfe84b303cc603097535cfb6f58867b6873401c" Jan 20 19:23:38 crc kubenswrapper[4995]: I0120 19:23:38.533328 4995 scope.go:117] "RemoveContainer" containerID="29778e3fa999c9e76891d13962e02020c2993b9cf6421621f8b6811fe411fe7c" Jan 20 19:24:00 crc kubenswrapper[4995]: I0120 19:24:00.571228 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:24:00 crc kubenswrapper[4995]: I0120 19:24:00.571854 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:24:30 crc kubenswrapper[4995]: I0120 19:24:30.571586 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:24:30 crc kubenswrapper[4995]: I0120 19:24:30.572203 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:24:30 crc kubenswrapper[4995]: I0120 19:24:30.572272 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 19:24:30 crc kubenswrapper[4995]: I0120 19:24:30.573130 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7cabc77bb95aab9e063862d0faec936d6cc3548390cf6b714d010c108a355cba"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 19:24:30 crc kubenswrapper[4995]: I0120 19:24:30.573191 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://7cabc77bb95aab9e063862d0faec936d6cc3548390cf6b714d010c108a355cba" gracePeriod=600 Jan 20 19:24:30 crc kubenswrapper[4995]: I0120 19:24:30.861588 4995 generic.go:334] "Generic (PLEG): container finished" podID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerID="7cabc77bb95aab9e063862d0faec936d6cc3548390cf6b714d010c108a355cba" exitCode=0 Jan 20 19:24:30 crc kubenswrapper[4995]: I0120 19:24:30.861663 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerDied","Data":"7cabc77bb95aab9e063862d0faec936d6cc3548390cf6b714d010c108a355cba"} Jan 20 19:24:30 crc kubenswrapper[4995]: I0120 19:24:30.862749 4995 scope.go:117] "RemoveContainer" containerID="7cceb16f67ebd760686e594bb544231c57b2bba48a8559abfd9d5e29c1bae341" Jan 20 19:24:31 crc kubenswrapper[4995]: I0120 19:24:31.874972 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" event={"ID":"80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a","Type":"ContainerStarted","Data":"8dcde1e334307721c168e8086374ebb9fab702c0286ce6de91f30c8727cdca00"} Jan 20 19:26:30 crc kubenswrapper[4995]: I0120 19:26:30.571928 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:26:30 crc kubenswrapper[4995]: I0120 19:26:30.572580 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.144312 4995 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8bqf5"] Jan 20 19:27:00 crc kubenswrapper[4995]: E0120 19:27:00.145273 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerName="registry-server" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.145286 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerName="registry-server" Jan 20 19:27:00 crc kubenswrapper[4995]: E0120 19:27:00.145297 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerName="extract-content" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.145303 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerName="extract-content" Jan 20 19:27:00 crc kubenswrapper[4995]: E0120 19:27:00.145320 4995 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerName="extract-utilities" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.145326 4995 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerName="extract-utilities" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.145523 4995 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd7b4176-dc92-4ae8-9b57-75f800461bce" containerName="registry-server" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.146897 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.153608 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8bqf5"] Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.236955 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-catalog-content\") pod \"certified-operators-8bqf5\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.237051 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-utilities\") pod \"certified-operators-8bqf5\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.237186 4995 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cpv4\" (UniqueName: \"kubernetes.io/projected/617947af-3be8-41c9-85f3-70c28a93c6eb-kube-api-access-7cpv4\") pod \"certified-operators-8bqf5\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.338415 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cpv4\" (UniqueName: \"kubernetes.io/projected/617947af-3be8-41c9-85f3-70c28a93c6eb-kube-api-access-7cpv4\") pod \"certified-operators-8bqf5\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.338565 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-catalog-content\") pod \"certified-operators-8bqf5\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.338600 4995 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-utilities\") pod \"certified-operators-8bqf5\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.339115 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-utilities\") pod \"certified-operators-8bqf5\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.339391 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-catalog-content\") pod \"certified-operators-8bqf5\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.571774 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:27:00 crc kubenswrapper[4995]: I0120 19:27:00.571832 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:27:01 crc kubenswrapper[4995]: I0120 19:27:01.030529 4995 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cpv4\" (UniqueName: \"kubernetes.io/projected/617947af-3be8-41c9-85f3-70c28a93c6eb-kube-api-access-7cpv4\") pod \"certified-operators-8bqf5\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:01 crc kubenswrapper[4995]: I0120 19:27:01.068214 4995 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:01 crc kubenswrapper[4995]: I0120 19:27:01.572789 4995 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8bqf5"] Jan 20 19:27:02 crc kubenswrapper[4995]: I0120 19:27:02.416265 4995 generic.go:334] "Generic (PLEG): container finished" podID="617947af-3be8-41c9-85f3-70c28a93c6eb" containerID="a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1" exitCode=0 Jan 20 19:27:02 crc kubenswrapper[4995]: I0120 19:27:02.416772 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8bqf5" event={"ID":"617947af-3be8-41c9-85f3-70c28a93c6eb","Type":"ContainerDied","Data":"a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1"} Jan 20 19:27:02 crc kubenswrapper[4995]: I0120 19:27:02.416853 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8bqf5" event={"ID":"617947af-3be8-41c9-85f3-70c28a93c6eb","Type":"ContainerStarted","Data":"62f868c9b6db5a2604895ebc4278dceccc8a198a7ad03d974392ab923128b875"} Jan 20 19:27:02 crc kubenswrapper[4995]: I0120 19:27:02.420503 4995 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 19:27:03 crc kubenswrapper[4995]: I0120 19:27:03.431001 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8bqf5" event={"ID":"617947af-3be8-41c9-85f3-70c28a93c6eb","Type":"ContainerStarted","Data":"3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f"} Jan 20 19:27:04 crc kubenswrapper[4995]: I0120 19:27:04.443208 4995 generic.go:334] "Generic (PLEG): container finished" podID="617947af-3be8-41c9-85f3-70c28a93c6eb" containerID="3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f" exitCode=0 Jan 20 19:27:04 crc kubenswrapper[4995]: I0120 19:27:04.443259 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8bqf5" event={"ID":"617947af-3be8-41c9-85f3-70c28a93c6eb","Type":"ContainerDied","Data":"3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f"} Jan 20 19:27:06 crc kubenswrapper[4995]: I0120 19:27:06.467574 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8bqf5" event={"ID":"617947af-3be8-41c9-85f3-70c28a93c6eb","Type":"ContainerStarted","Data":"cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95"} Jan 20 19:27:06 crc kubenswrapper[4995]: I0120 19:27:06.498256 4995 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8bqf5" podStartSLOduration=3.084009777 podStartE2EDuration="6.498234631s" podCreationTimestamp="2026-01-20 19:27:00 +0000 UTC" firstStartedPulling="2026-01-20 19:27:02.420260348 +0000 UTC m=+10540.664865154" lastFinishedPulling="2026-01-20 19:27:05.834485202 +0000 UTC m=+10544.079090008" observedRunningTime="2026-01-20 19:27:06.487186643 +0000 UTC m=+10544.731791479" watchObservedRunningTime="2026-01-20 19:27:06.498234631 +0000 UTC m=+10544.742839447" Jan 20 19:27:11 crc kubenswrapper[4995]: I0120 19:27:11.068898 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:11 crc kubenswrapper[4995]: I0120 19:27:11.069412 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:11 crc kubenswrapper[4995]: I0120 19:27:11.153101 4995 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:11 crc kubenswrapper[4995]: I0120 19:27:11.588640 4995 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:11 crc kubenswrapper[4995]: I0120 19:27:11.639535 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8bqf5"] Jan 20 19:27:13 crc kubenswrapper[4995]: I0120 19:27:13.534541 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8bqf5" podUID="617947af-3be8-41c9-85f3-70c28a93c6eb" containerName="registry-server" containerID="cri-o://cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95" gracePeriod=2 Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.052170 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.123360 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cpv4\" (UniqueName: \"kubernetes.io/projected/617947af-3be8-41c9-85f3-70c28a93c6eb-kube-api-access-7cpv4\") pod \"617947af-3be8-41c9-85f3-70c28a93c6eb\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.123436 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-utilities\") pod \"617947af-3be8-41c9-85f3-70c28a93c6eb\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.123502 4995 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-catalog-content\") pod \"617947af-3be8-41c9-85f3-70c28a93c6eb\" (UID: \"617947af-3be8-41c9-85f3-70c28a93c6eb\") " Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.124495 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-utilities" (OuterVolumeSpecName: "utilities") pod "617947af-3be8-41c9-85f3-70c28a93c6eb" (UID: "617947af-3be8-41c9-85f3-70c28a93c6eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.129648 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/617947af-3be8-41c9-85f3-70c28a93c6eb-kube-api-access-7cpv4" (OuterVolumeSpecName: "kube-api-access-7cpv4") pod "617947af-3be8-41c9-85f3-70c28a93c6eb" (UID: "617947af-3be8-41c9-85f3-70c28a93c6eb"). InnerVolumeSpecName "kube-api-access-7cpv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.132411 4995 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cpv4\" (UniqueName: \"kubernetes.io/projected/617947af-3be8-41c9-85f3-70c28a93c6eb-kube-api-access-7cpv4\") on node \"crc\" DevicePath \"\"" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.132444 4995 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.170256 4995 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "617947af-3be8-41c9-85f3-70c28a93c6eb" (UID: "617947af-3be8-41c9-85f3-70c28a93c6eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.234329 4995 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/617947af-3be8-41c9-85f3-70c28a93c6eb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.545955 4995 generic.go:334] "Generic (PLEG): container finished" podID="617947af-3be8-41c9-85f3-70c28a93c6eb" containerID="cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95" exitCode=0 Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.546000 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8bqf5" event={"ID":"617947af-3be8-41c9-85f3-70c28a93c6eb","Type":"ContainerDied","Data":"cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95"} Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.546025 4995 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8bqf5" event={"ID":"617947af-3be8-41c9-85f3-70c28a93c6eb","Type":"ContainerDied","Data":"62f868c9b6db5a2604895ebc4278dceccc8a198a7ad03d974392ab923128b875"} Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.546041 4995 scope.go:117] "RemoveContainer" containerID="cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.546043 4995 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8bqf5" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.571342 4995 scope.go:117] "RemoveContainer" containerID="3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.602713 4995 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8bqf5"] Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.625196 4995 scope.go:117] "RemoveContainer" containerID="a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.655879 4995 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8bqf5"] Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.659019 4995 scope.go:117] "RemoveContainer" containerID="cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95" Jan 20 19:27:14 crc kubenswrapper[4995]: E0120 19:27:14.659566 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95\": container with ID starting with cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95 not found: ID does not exist" containerID="cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.659608 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95"} err="failed to get container status \"cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95\": rpc error: code = NotFound desc = could not find container \"cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95\": container with ID starting with cd9197e0bf68c7eee3b74d3728c271ce76853aa15e8318b32d095c3f0b6d3c95 not found: ID does not exist" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.659633 4995 scope.go:117] "RemoveContainer" containerID="3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f" Jan 20 19:27:14 crc kubenswrapper[4995]: E0120 19:27:14.660056 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f\": container with ID starting with 3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f not found: ID does not exist" containerID="3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.660100 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f"} err="failed to get container status \"3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f\": rpc error: code = NotFound desc = could not find container \"3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f\": container with ID starting with 3f8b36de5644170625206329c2eef973a506577146fbef84ae683fa25da8ce8f not found: ID does not exist" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.660118 4995 scope.go:117] "RemoveContainer" containerID="a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1" Jan 20 19:27:14 crc kubenswrapper[4995]: E0120 19:27:14.664405 4995 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1\": container with ID starting with a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1 not found: ID does not exist" containerID="a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1" Jan 20 19:27:14 crc kubenswrapper[4995]: I0120 19:27:14.664449 4995 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1"} err="failed to get container status \"a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1\": rpc error: code = NotFound desc = could not find container \"a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1\": container with ID starting with a1722799eba3935bdf3779651c59fbdc5614143723bbccd64927e9bb366416f1 not found: ID does not exist" Jan 20 19:27:16 crc kubenswrapper[4995]: I0120 19:27:16.004405 4995 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="617947af-3be8-41c9-85f3-70c28a93c6eb" path="/var/lib/kubelet/pods/617947af-3be8-41c9-85f3-70c28a93c6eb/volumes" Jan 20 19:27:30 crc kubenswrapper[4995]: I0120 19:27:30.572122 4995 patch_prober.go:28] interesting pod/machine-config-daemon-ns9m2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:27:30 crc kubenswrapper[4995]: I0120 19:27:30.573105 4995 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:27:30 crc kubenswrapper[4995]: I0120 19:27:30.573157 4995 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" Jan 20 19:27:30 crc kubenswrapper[4995]: I0120 19:27:30.574067 4995 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8dcde1e334307721c168e8086374ebb9fab702c0286ce6de91f30c8727cdca00"} pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 19:27:30 crc kubenswrapper[4995]: I0120 19:27:30.574152 4995 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-ns9m2" podUID="80d6e58a-a0f6-4c31-b841-2c4fbaaf8f8a" containerName="machine-config-daemon" containerID="cri-o://8dcde1e334307721c168e8086374ebb9fab702c0286ce6de91f30c8727cdca00" gracePeriod=600 var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515133753453024456 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015133753454017374 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015133726346016517 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015133726347015470 5ustar corecore